]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blame - test/grsecurity-3.1-4.0.8-201507111211.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-3.1-4.0.8-201507111211.patch
CommitLineData
71d05bda
PK
1diff --git a/Documentation/dontdiff b/Documentation/dontdiff
2index 9de9813..1462492 100644
3--- a/Documentation/dontdiff
4+++ b/Documentation/dontdiff
5@@ -3,9 +3,11 @@
6 *.bc
7 *.bin
8 *.bz2
9+*.c.[012]*.*
10 *.cis
11 *.cpio
12 *.csp
13+*.dbg
14 *.dsp
15 *.dvi
16 *.elf
17@@ -15,6 +17,7 @@
18 *.gcov
19 *.gen.S
20 *.gif
21+*.gmo
22 *.grep
23 *.grp
24 *.gz
25@@ -51,14 +54,17 @@
26 *.tab.h
27 *.tex
28 *.ver
29+*.vim
30 *.xml
31 *.xz
32 *_MODULES
33+*_reg_safe.h
34 *_vga16.c
35 *~
36 \#*#
37 *.9
38-.*
39+.[^g]*
40+.gen*
41 .*.d
42 .mm
43 53c700_d.h
44@@ -72,9 +78,11 @@ Image
45 Module.markers
46 Module.symvers
47 PENDING
48+PERF*
49 SCCS
50 System.map*
51 TAGS
52+TRACEEVENT-CFLAGS
53 aconf
54 af_names.h
55 aic7*reg.h*
56@@ -83,6 +91,7 @@ aic7*seq.h*
57 aicasm
58 aicdb.h*
59 altivec*.c
60+ashldi3.S
61 asm-offsets.h
62 asm_offsets.h
63 autoconf.h*
64@@ -95,32 +104,40 @@ bounds.h
65 bsetup
66 btfixupprep
67 build
68+builtin-policy.h
69 bvmlinux
70 bzImage*
71 capability_names.h
72 capflags.c
73 classlist.h*
74+clut_vga16.c
75+common-cmds.h
76 comp*.log
77 compile.h*
78 conf
79 config
80 config-*
81 config_data.h*
82+config.c
83 config.mak
84 config.mak.autogen
85+config.tmp
86 conmakehash
87 consolemap_deftbl.c*
88 cpustr.h
89 crc32table.h*
90 cscope.*
91 defkeymap.c
92+devicetable-offsets.h
93 devlist.h*
94 dnotify_test
95 docproc
96 dslm
97+dtc-lexer.lex.c
98 elf2ecoff
99 elfconfig.h*
100 evergreen_reg_safe.h
101+exception_policy.conf
102 fixdep
103 flask.h
104 fore200e_mkfirm
105@@ -128,12 +145,15 @@ fore200e_pca_fw.c*
106 gconf
107 gconf.glade.h
108 gen-devlist
109+gen-kdb_cmds.c
110 gen_crc32table
111 gen_init_cpio
112 generated
113 genheaders
114 genksyms
115 *_gray256.c
116+hash
117+hid-example
118 hpet_example
119 hugepage-mmap
120 hugepage-shm
121@@ -148,14 +168,14 @@ int32.c
122 int4.c
123 int8.c
124 kallsyms
125-kconfig
126+kern_constants.h
127 keywords.c
128 ksym.c*
129 ksym.h*
130 kxgettext
131 lex.c
132 lex.*.c
133-linux
134+lib1funcs.S
135 logo_*.c
136 logo_*_clut224.c
137 logo_*_mono.c
138@@ -165,14 +185,15 @@ mach-types.h
139 machtypes.h
140 map
141 map_hugetlb
142-media
143 mconf
144+mdp
145 miboot*
146 mk_elfconfig
147 mkboot
148 mkbugboot
149 mkcpustr
150 mkdep
151+mkpiggy
152 mkprep
153 mkregtable
154 mktables
155@@ -188,6 +209,8 @@ oui.c*
156 page-types
157 parse.c
158 parse.h
159+parse-events*
160+pasyms.h
161 patches*
162 pca200e.bin
163 pca200e_ecd.bin2
164@@ -197,6 +220,7 @@ perf-archive
165 piggyback
166 piggy.gzip
167 piggy.S
168+pmu-*
169 pnmtologo
170 ppc_defs.h*
171 pss_boot.h
172@@ -206,7 +230,12 @@ r200_reg_safe.h
173 r300_reg_safe.h
174 r420_reg_safe.h
175 r600_reg_safe.h
176+randomize_layout_hash.h
177+randomize_layout_seed.h
178+realmode.lds
179+realmode.relocs
180 recordmcount
181+regdb.c
182 relocs
183 rlim_names.h
184 rn50_reg_safe.h
185@@ -216,8 +245,12 @@ series
186 setup
187 setup.bin
188 setup.elf
189+signing_key*
190+size_overflow_hash.h
191 sImage
192+slabinfo
193 sm_tbl*
194+sortextable
195 split-include
196 syscalltab.h
197 tables.c
198@@ -227,6 +260,7 @@ tftpboot.img
199 timeconst.h
200 times.h*
201 trix_boot.h
202+user_constants.h
203 utsrelease.h*
204 vdso-syms.lds
205 vdso.lds
206@@ -238,13 +272,17 @@ vdso32.lds
207 vdso32.so.dbg
208 vdso64.lds
209 vdso64.so.dbg
210+vdsox32.lds
211+vdsox32-syms.lds
212 version.h*
213 vmImage
214 vmlinux
215 vmlinux-*
216 vmlinux.aout
217 vmlinux.bin.all
218+vmlinux.bin.bz2
219 vmlinux.lds
220+vmlinux.relocs
221 vmlinuz
222 voffset.h
223 vsyscall.lds
224@@ -252,9 +290,12 @@ vsyscall_32.lds
225 wanxlfw.inc
226 uImage
227 unifdef
228+utsrelease.h
229 wakeup.bin
230 wakeup.elf
231 wakeup.lds
232+x509*
233 zImage*
234 zconf.hash.c
235+zconf.lex.c
236 zoffset.h
237diff --git a/Documentation/kbuild/makefiles.txt b/Documentation/kbuild/makefiles.txt
238index 74b6c6d..eac0e77 100644
239--- a/Documentation/kbuild/makefiles.txt
240+++ b/Documentation/kbuild/makefiles.txt
241@@ -23,10 +23,11 @@ This document describes the Linux kernel Makefiles.
242 === 4 Host Program support
243 --- 4.1 Simple Host Program
244 --- 4.2 Composite Host Programs
245- --- 4.3 Using C++ for host programs
246- --- 4.4 Controlling compiler options for host programs
247- --- 4.5 When host programs are actually built
248- --- 4.6 Using hostprogs-$(CONFIG_FOO)
249+ --- 4.3 Defining shared libraries
250+ --- 4.4 Using C++ for host programs
251+ --- 4.5 Controlling compiler options for host programs
252+ --- 4.6 When host programs are actually built
253+ --- 4.7 Using hostprogs-$(CONFIG_FOO)
254
255 === 5 Kbuild clean infrastructure
256
257@@ -643,7 +644,29 @@ Both possibilities are described in the following.
258 Finally, the two .o files are linked to the executable, lxdialog.
259 Note: The syntax <executable>-y is not permitted for host-programs.
260
261---- 4.3 Using C++ for host programs
262+--- 4.3 Defining shared libraries
263+
264+ Objects with extension .so are considered shared libraries, and
265+ will be compiled as position independent objects.
266+ Kbuild provides support for shared libraries, but the usage
267+ shall be restricted.
268+ In the following example the libkconfig.so shared library is used
269+ to link the executable conf.
270+
271+ Example:
272+ #scripts/kconfig/Makefile
273+ hostprogs-y := conf
274+ conf-objs := conf.o libkconfig.so
275+ libkconfig-objs := expr.o type.o
276+
277+ Shared libraries always require a corresponding -objs line, and
278+ in the example above the shared library libkconfig is composed by
279+ the two objects expr.o and type.o.
280+ expr.o and type.o will be built as position independent code and
281+ linked as a shared library libkconfig.so. C++ is not supported for
282+ shared libraries.
283+
284+--- 4.4 Using C++ for host programs
285
286 kbuild offers support for host programs written in C++. This was
287 introduced solely to support kconfig, and is not recommended
288@@ -666,7 +689,7 @@ Both possibilities are described in the following.
289 qconf-cxxobjs := qconf.o
290 qconf-objs := check.o
291
292---- 4.4 Controlling compiler options for host programs
293+--- 4.5 Controlling compiler options for host programs
294
295 When compiling host programs, it is possible to set specific flags.
296 The programs will always be compiled utilising $(HOSTCC) passed
297@@ -694,7 +717,7 @@ Both possibilities are described in the following.
298 When linking qconf, it will be passed the extra option
299 "-L$(QTDIR)/lib".
300
301---- 4.5 When host programs are actually built
302+--- 4.6 When host programs are actually built
303
304 Kbuild will only build host-programs when they are referenced
305 as a prerequisite.
306@@ -725,7 +748,7 @@ Both possibilities are described in the following.
307 This will tell kbuild to build lxdialog even if not referenced in
308 any rule.
309
310---- 4.6 Using hostprogs-$(CONFIG_FOO)
311+--- 4.7 Using hostprogs-$(CONFIG_FOO)
312
313 A typical pattern in a Kbuild file looks like this:
314
315diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
316index 4d68ec8..9546b75 100644
317--- a/Documentation/kernel-parameters.txt
318+++ b/Documentation/kernel-parameters.txt
319@@ -1203,6 +1203,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
320 Format: <unsigned int> such that (rxsize & ~0x1fffc0) == 0.
321 Default: 1024
322
323+ grsec_proc_gid= [GRKERNSEC_PROC_USERGROUP] Chooses GID to
324+ ignore grsecurity's /proc restrictions
325+
326+ grsec_sysfs_restrict= Format: 0 | 1
327+ Default: 1
328+ Disables GRKERNSEC_SYSFS_RESTRICT if enabled in config
329+
330 hashdist= [KNL,NUMA] Large hashes allocated during boot
331 are distributed across NUMA nodes. Defaults on
332 for 64-bit NUMA, off otherwise.
333@@ -2300,6 +2307,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
334 noexec=on: enable non-executable mappings (default)
335 noexec=off: disable non-executable mappings
336
337+ nopcid [X86-64]
338+ Disable PCID (Process-Context IDentifier) even if it
339+ is supported by the processor.
340+
341 nosmap [X86]
342 Disable SMAP (Supervisor Mode Access Prevention)
343 even if it is supported by processor.
344@@ -2601,6 +2612,30 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
345 the specified number of seconds. This is to be used if
346 your oopses keep scrolling off the screen.
347
348+ pax_nouderef [X86] disables UDEREF. Most likely needed under certain
349+ virtualization environments that don't cope well with the
350+ expand down segment used by UDEREF on X86-32 or the frequent
351+ page table updates on X86-64.
352+
353+ pax_sanitize_slab=
354+ Format: { 0 | 1 | off | fast | full }
355+ Options '0' and '1' are only provided for backward
356+ compatibility, 'off' or 'fast' should be used instead.
357+ 0|off : disable slab object sanitization
358+ 1|fast: enable slab object sanitization excluding
359+ whitelisted slabs (default)
360+ full : sanitize all slabs, even the whitelisted ones
361+
362+ pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
363+
364+ pax_extra_latent_entropy
365+ Enable a very simple form of latent entropy extraction
366+ from the first 4GB of memory as the bootmem allocator
367+ passes the memory pages to the buddy allocator.
368+
369+ pax_weakuderef [X86-64] enables the weaker but faster form of UDEREF
370+ when the processor supports PCID.
371+
372 pcbit= [HW,ISDN]
373
374 pcd. [PARIDE]
375diff --git a/Makefile b/Makefile
376index 0e315d6..68f608f 100644
377--- a/Makefile
378+++ b/Makefile
379@@ -298,7 +298,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
380 HOSTCC = gcc
381 HOSTCXX = g++
382 HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer -std=gnu89
383-HOSTCXXFLAGS = -O2
384+HOSTCFLAGS = -W -Wno-unused-parameter -Wno-missing-field-initializers -fno-delete-null-pointer-checks
385+HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
386+HOSTCXXFLAGS = -O2 -Wall -W -Wno-array-bounds
387
388 ifeq ($(shell $(HOSTCC) -v 2>&1 | grep -c "clang version"), 1)
389 HOSTCFLAGS += -Wno-unused-value -Wno-unused-parameter \
390@@ -446,8 +448,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn \
391 # Rules shared between *config targets and build targets
392
393 # Basic helpers built in scripts/
394-PHONY += scripts_basic
395-scripts_basic:
396+PHONY += scripts_basic gcc-plugins
397+scripts_basic: gcc-plugins
398 $(Q)$(MAKE) $(build)=scripts/basic
399 $(Q)rm -f .tmp_quiet_recordmcount
400
401@@ -622,6 +624,74 @@ endif
402 # Tell gcc to never replace conditional load with a non-conditional one
403 KBUILD_CFLAGS += $(call cc-option,--param=allow-store-data-races=0)
404
405+ifndef DISABLE_PAX_PLUGINS
406+ifeq ($(call cc-ifversion, -ge, 0408, y), y)
407+PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCXX)" "$(HOSTCXX)" "$(CC)")
408+else
409+PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(HOSTCXX)" "$(CC)")
410+endif
411+ifneq ($(PLUGINCC),)
412+ifdef CONFIG_PAX_CONSTIFY_PLUGIN
413+CONSTIFY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
414+endif
415+ifdef CONFIG_PAX_MEMORY_STACKLEAK
416+STACKLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
417+STACKLEAK_PLUGIN_CFLAGS += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
418+endif
419+ifdef CONFIG_KALLOCSTAT_PLUGIN
420+KALLOCSTAT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
421+endif
422+ifdef CONFIG_PAX_KERNEXEC_PLUGIN
423+KERNEXEC_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
424+KERNEXEC_PLUGIN_CFLAGS += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD) -DKERNEXEC_PLUGIN
425+KERNEXEC_PLUGIN_AFLAGS := -DKERNEXEC_PLUGIN
426+endif
427+ifdef CONFIG_GRKERNSEC_RANDSTRUCT
428+RANDSTRUCT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/randomize_layout_plugin.so -DRANDSTRUCT_PLUGIN
429+ifdef CONFIG_GRKERNSEC_RANDSTRUCT_PERFORMANCE
430+RANDSTRUCT_PLUGIN_CFLAGS += -fplugin-arg-randomize_layout_plugin-performance-mode
431+endif
432+endif
433+ifdef CONFIG_CHECKER_PLUGIN
434+ifeq ($(call cc-ifversion, -ge, 0406, y), y)
435+CHECKER_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
436+endif
437+endif
438+COLORIZE_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/colorize_plugin.so
439+ifdef CONFIG_PAX_SIZE_OVERFLOW
440+SIZE_OVERFLOW_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/size_overflow_plugin/size_overflow_plugin.so -DSIZE_OVERFLOW_PLUGIN
441+endif
442+ifdef CONFIG_PAX_LATENT_ENTROPY
443+LATENT_ENTROPY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/latent_entropy_plugin.so -DLATENT_ENTROPY_PLUGIN
444+endif
445+ifdef CONFIG_PAX_MEMORY_STRUCTLEAK
446+STRUCTLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/structleak_plugin.so -DSTRUCTLEAK_PLUGIN
447+endif
448+INITIFY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/initify_plugin.so -DINITIFY_PLUGIN
449+GCC_PLUGINS_CFLAGS := $(CONSTIFY_PLUGIN_CFLAGS) $(STACKLEAK_PLUGIN_CFLAGS) $(KALLOCSTAT_PLUGIN_CFLAGS)
450+GCC_PLUGINS_CFLAGS += $(KERNEXEC_PLUGIN_CFLAGS) $(CHECKER_PLUGIN_CFLAGS) $(COLORIZE_PLUGIN_CFLAGS)
451+GCC_PLUGINS_CFLAGS += $(SIZE_OVERFLOW_PLUGIN_CFLAGS) $(LATENT_ENTROPY_PLUGIN_CFLAGS) $(STRUCTLEAK_PLUGIN_CFLAGS)
452+GCC_PLUGINS_CFLAGS += $(INITIFY_PLUGIN_CFLAGS)
453+GCC_PLUGINS_CFLAGS += $(RANDSTRUCT_PLUGIN_CFLAGS)
454+GCC_PLUGINS_AFLAGS := $(KERNEXEC_PLUGIN_AFLAGS)
455+export PLUGINCC GCC_PLUGINS_CFLAGS GCC_PLUGINS_AFLAGS CONSTIFY_PLUGIN LATENT_ENTROPY_PLUGIN_CFLAGS
456+ifeq ($(KBUILD_EXTMOD),)
457+gcc-plugins:
458+ $(Q)$(MAKE) $(build)=tools/gcc
459+else
460+gcc-plugins: ;
461+endif
462+else
463+gcc-plugins:
464+ifeq ($(call cc-ifversion, -ge, 0405, y), y)
465+ $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
466+else
467+ $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
468+endif
469+ $(Q)echo "PAX_MEMORY_STACKLEAK, constification, PAX_LATENT_ENTROPY and other features will be less secure. PAX_SIZE_OVERFLOW will not be active."
470+endif
471+endif
472+
473 ifdef CONFIG_READABLE_ASM
474 # Disable optimizations that make assembler listings hard to read.
475 # reorder blocks reorders the control in the function
476@@ -714,7 +784,7 @@ KBUILD_CFLAGS += $(call cc-option, -gsplit-dwarf, -g)
477 else
478 KBUILD_CFLAGS += -g
479 endif
480-KBUILD_AFLAGS += -Wa,-gdwarf-2
481+KBUILD_AFLAGS += -Wa,--gdwarf-2
482 endif
483 ifdef CONFIG_DEBUG_INFO_DWARF4
484 KBUILD_CFLAGS += $(call cc-option, -gdwarf-4,)
485@@ -884,7 +954,7 @@ export mod_sign_cmd
486
487
488 ifeq ($(KBUILD_EXTMOD),)
489-core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
490+core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
491
492 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
493 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
494@@ -934,6 +1004,8 @@ endif
495
496 # The actual objects are generated when descending,
497 # make sure no implicit rule kicks in
498+$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
499+$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
500 $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
501
502 # Handle descending into subdirectories listed in $(vmlinux-dirs)
503@@ -943,7 +1015,7 @@ $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
504 # Error messages still appears in the original language
505
506 PHONY += $(vmlinux-dirs)
507-$(vmlinux-dirs): prepare scripts
508+$(vmlinux-dirs): gcc-plugins prepare scripts
509 $(Q)$(MAKE) $(build)=$@
510
511 define filechk_kernel.release
512@@ -986,10 +1058,13 @@ prepare1: prepare2 $(version_h) include/generated/utsrelease.h \
513
514 archprepare: archheaders archscripts prepare1 scripts_basic
515
516+prepare0: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
517+prepare0: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
518 prepare0: archprepare FORCE
519 $(Q)$(MAKE) $(build)=.
520
521 # All the preparing..
522+prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
523 prepare: prepare0
524
525 # Generate some files
526@@ -1103,6 +1178,8 @@ all: modules
527 # using awk while concatenating to the final file.
528
529 PHONY += modules
530+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
531+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
532 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
533 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
534 @$(kecho) ' Building modules, stage 2.';
535@@ -1118,7 +1195,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
536
537 # Target to prepare building external modules
538 PHONY += modules_prepare
539-modules_prepare: prepare scripts
540+modules_prepare: gcc-plugins prepare scripts
541
542 # Target to install modules
543 PHONY += modules_install
544@@ -1184,7 +1261,10 @@ MRPROPER_FILES += .config .config.old .version .old_version \
545 Module.symvers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS \
546 signing_key.priv signing_key.x509 x509.genkey \
547 extra_certificates signing_key.x509.keyid \
548- signing_key.x509.signer vmlinux-gdb.py
549+ signing_key.x509.signer vmlinux-gdb.py \
550+ tools/gcc/size_overflow_plugin/size_overflow_hash_aux.h \
551+ tools/gcc/size_overflow_plugin/size_overflow_hash.h \
552+ tools/gcc/randomize_layout_seed.h
553
554 # clean - Delete most, but leave enough to build external modules
555 #
556@@ -1223,7 +1303,7 @@ distclean: mrproper
557 @find $(srctree) $(RCS_FIND_IGNORE) \
558 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
559 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
560- -o -name '.*.rej' -o -name '*%' -o -name 'core' \) \
561+ -o -name '.*.rej' -o -name '*.so' -o -name '*%' -o -name 'core' \) \
562 -type f -print | xargs rm -f
563
564
565@@ -1389,6 +1469,8 @@ PHONY += $(module-dirs) modules
566 $(module-dirs): crmodverdir $(objtree)/Module.symvers
567 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
568
569+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
570+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
571 modules: $(module-dirs)
572 @$(kecho) ' Building modules, stage 2.';
573 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
574@@ -1529,17 +1611,21 @@ else
575 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
576 endif
577
578-%.s: %.c prepare scripts FORCE
579+%.s: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
580+%.s: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
581+%.s: %.c gcc-plugins prepare scripts FORCE
582 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
583 %.i: %.c prepare scripts FORCE
584 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
585-%.o: %.c prepare scripts FORCE
586+%.o: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
587+%.o: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
588+%.o: %.c gcc-plugins prepare scripts FORCE
589 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
590 %.lst: %.c prepare scripts FORCE
591 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
592-%.s: %.S prepare scripts FORCE
593+%.s: %.S gcc-plugins prepare scripts FORCE
594 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
595-%.o: %.S prepare scripts FORCE
596+%.o: %.S gcc-plugins prepare scripts FORCE
597 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
598 %.symtypes: %.c prepare scripts FORCE
599 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
600@@ -1551,11 +1637,15 @@ endif
601 $(build)=$(build-dir)
602 # Make sure the latest headers are built for Documentation
603 Documentation/: headers_install
604-%/: prepare scripts FORCE
605+%/: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
606+%/: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
607+%/: gcc-plugins prepare scripts FORCE
608 $(cmd_crmodverdir)
609 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
610 $(build)=$(build-dir)
611-%.ko: prepare scripts FORCE
612+%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
613+%.ko: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
614+%.ko: gcc-plugins prepare scripts FORCE
615 $(cmd_crmodverdir)
616 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
617 $(build)=$(build-dir) $(@:.ko=.o)
618diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
619index 8f8eafb..3405f46 100644
620--- a/arch/alpha/include/asm/atomic.h
621+++ b/arch/alpha/include/asm/atomic.h
622@@ -239,4 +239,14 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
623 #define atomic_dec(v) atomic_sub(1,(v))
624 #define atomic64_dec(v) atomic64_sub(1,(v))
625
626+#define atomic64_read_unchecked(v) atomic64_read(v)
627+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
628+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
629+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
630+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
631+#define atomic64_inc_unchecked(v) atomic64_inc(v)
632+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
633+#define atomic64_dec_unchecked(v) atomic64_dec(v)
634+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
635+
636 #endif /* _ALPHA_ATOMIC_H */
637diff --git a/arch/alpha/include/asm/cache.h b/arch/alpha/include/asm/cache.h
638index ad368a9..fbe0f25 100644
639--- a/arch/alpha/include/asm/cache.h
640+++ b/arch/alpha/include/asm/cache.h
641@@ -4,19 +4,19 @@
642 #ifndef __ARCH_ALPHA_CACHE_H
643 #define __ARCH_ALPHA_CACHE_H
644
645+#include <linux/const.h>
646
647 /* Bytes per L1 (data) cache line. */
648 #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EV6)
649-# define L1_CACHE_BYTES 64
650 # define L1_CACHE_SHIFT 6
651 #else
652 /* Both EV4 and EV5 are write-through, read-allocate,
653 direct-mapped, physical.
654 */
655-# define L1_CACHE_BYTES 32
656 # define L1_CACHE_SHIFT 5
657 #endif
658
659+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
660 #define SMP_CACHE_BYTES L1_CACHE_BYTES
661
662 #endif
663diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
664index 968d999..d36b2df 100644
665--- a/arch/alpha/include/asm/elf.h
666+++ b/arch/alpha/include/asm/elf.h
667@@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
668
669 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
670
671+#ifdef CONFIG_PAX_ASLR
672+#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
673+
674+#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
675+#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
676+#endif
677+
678 /* $0 is set by ld.so to a pointer to a function which might be
679 registered using atexit. This provides a mean for the dynamic
680 linker to call DT_FINI functions for shared libraries that have
681diff --git a/arch/alpha/include/asm/pgalloc.h b/arch/alpha/include/asm/pgalloc.h
682index aab14a0..b4fa3e7 100644
683--- a/arch/alpha/include/asm/pgalloc.h
684+++ b/arch/alpha/include/asm/pgalloc.h
685@@ -29,6 +29,12 @@ pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
686 pgd_set(pgd, pmd);
687 }
688
689+static inline void
690+pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
691+{
692+ pgd_populate(mm, pgd, pmd);
693+}
694+
695 extern pgd_t *pgd_alloc(struct mm_struct *mm);
696
697 static inline void
698diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
699index a9a1195..e9b8417 100644
700--- a/arch/alpha/include/asm/pgtable.h
701+++ b/arch/alpha/include/asm/pgtable.h
702@@ -101,6 +101,17 @@ struct vm_area_struct;
703 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
704 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
705 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
706+
707+#ifdef CONFIG_PAX_PAGEEXEC
708+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
709+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
710+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
711+#else
712+# define PAGE_SHARED_NOEXEC PAGE_SHARED
713+# define PAGE_COPY_NOEXEC PAGE_COPY
714+# define PAGE_READONLY_NOEXEC PAGE_READONLY
715+#endif
716+
717 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
718
719 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
720diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
721index 2fd00b7..cfd5069 100644
722--- a/arch/alpha/kernel/module.c
723+++ b/arch/alpha/kernel/module.c
724@@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
725
726 /* The small sections were sorted to the end of the segment.
727 The following should definitely cover them. */
728- gp = (u64)me->module_core + me->core_size - 0x8000;
729+ gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
730 got = sechdrs[me->arch.gotsecindex].sh_addr;
731
732 for (i = 0; i < n; i++) {
733diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
734index e51f578..16c64a3 100644
735--- a/arch/alpha/kernel/osf_sys.c
736+++ b/arch/alpha/kernel/osf_sys.c
737@@ -1296,10 +1296,11 @@ SYSCALL_DEFINE1(old_adjtimex, struct timex32 __user *, txc_p)
738 generic version except that we know how to honor ADDR_LIMIT_32BIT. */
739
740 static unsigned long
741-arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
742- unsigned long limit)
743+arch_get_unmapped_area_1(struct file *filp, unsigned long addr, unsigned long len,
744+ unsigned long limit, unsigned long flags)
745 {
746 struct vm_unmapped_area_info info;
747+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
748
749 info.flags = 0;
750 info.length = len;
751@@ -1307,6 +1308,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
752 info.high_limit = limit;
753 info.align_mask = 0;
754 info.align_offset = 0;
755+ info.threadstack_offset = offset;
756 return vm_unmapped_area(&info);
757 }
758
759@@ -1339,20 +1341,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
760 merely specific addresses, but regions of memory -- perhaps
761 this feature should be incorporated into all ports? */
762
763+#ifdef CONFIG_PAX_RANDMMAP
764+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
765+#endif
766+
767 if (addr) {
768- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
769+ addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(addr), len, limit, flags);
770 if (addr != (unsigned long) -ENOMEM)
771 return addr;
772 }
773
774 /* Next, try allocating at TASK_UNMAPPED_BASE. */
775- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
776- len, limit);
777+ addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(current->mm->mmap_base), len, limit, flags);
778+
779 if (addr != (unsigned long) -ENOMEM)
780 return addr;
781
782 /* Finally, try allocating in low memory. */
783- addr = arch_get_unmapped_area_1 (PAGE_SIZE, len, limit);
784+ addr = arch_get_unmapped_area_1 (filp, PAGE_SIZE, len, limit, flags);
785
786 return addr;
787 }
788diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
789index 9d0ac09..479a962 100644
790--- a/arch/alpha/mm/fault.c
791+++ b/arch/alpha/mm/fault.c
792@@ -53,6 +53,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
793 __reload_thread(pcb);
794 }
795
796+#ifdef CONFIG_PAX_PAGEEXEC
797+/*
798+ * PaX: decide what to do with offenders (regs->pc = fault address)
799+ *
800+ * returns 1 when task should be killed
801+ * 2 when patched PLT trampoline was detected
802+ * 3 when unpatched PLT trampoline was detected
803+ */
804+static int pax_handle_fetch_fault(struct pt_regs *regs)
805+{
806+
807+#ifdef CONFIG_PAX_EMUPLT
808+ int err;
809+
810+ do { /* PaX: patched PLT emulation #1 */
811+ unsigned int ldah, ldq, jmp;
812+
813+ err = get_user(ldah, (unsigned int *)regs->pc);
814+ err |= get_user(ldq, (unsigned int *)(regs->pc+4));
815+ err |= get_user(jmp, (unsigned int *)(regs->pc+8));
816+
817+ if (err)
818+ break;
819+
820+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
821+ (ldq & 0xFFFF0000U) == 0xA77B0000U &&
822+ jmp == 0x6BFB0000U)
823+ {
824+ unsigned long r27, addr;
825+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
826+ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
827+
828+ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
829+ err = get_user(r27, (unsigned long *)addr);
830+ if (err)
831+ break;
832+
833+ regs->r27 = r27;
834+ regs->pc = r27;
835+ return 2;
836+ }
837+ } while (0);
838+
839+ do { /* PaX: patched PLT emulation #2 */
840+ unsigned int ldah, lda, br;
841+
842+ err = get_user(ldah, (unsigned int *)regs->pc);
843+ err |= get_user(lda, (unsigned int *)(regs->pc+4));
844+ err |= get_user(br, (unsigned int *)(regs->pc+8));
845+
846+ if (err)
847+ break;
848+
849+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
850+ (lda & 0xFFFF0000U) == 0xA77B0000U &&
851+ (br & 0xFFE00000U) == 0xC3E00000U)
852+ {
853+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
854+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
855+ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
856+
857+ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
858+ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
859+ return 2;
860+ }
861+ } while (0);
862+
863+ do { /* PaX: unpatched PLT emulation */
864+ unsigned int br;
865+
866+ err = get_user(br, (unsigned int *)regs->pc);
867+
868+ if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
869+ unsigned int br2, ldq, nop, jmp;
870+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
871+
872+ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
873+ err = get_user(br2, (unsigned int *)addr);
874+ err |= get_user(ldq, (unsigned int *)(addr+4));
875+ err |= get_user(nop, (unsigned int *)(addr+8));
876+ err |= get_user(jmp, (unsigned int *)(addr+12));
877+ err |= get_user(resolver, (unsigned long *)(addr+16));
878+
879+ if (err)
880+ break;
881+
882+ if (br2 == 0xC3600000U &&
883+ ldq == 0xA77B000CU &&
884+ nop == 0x47FF041FU &&
885+ jmp == 0x6B7B0000U)
886+ {
887+ regs->r28 = regs->pc+4;
888+ regs->r27 = addr+16;
889+ regs->pc = resolver;
890+ return 3;
891+ }
892+ }
893+ } while (0);
894+#endif
895+
896+ return 1;
897+}
898+
899+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
900+{
901+ unsigned long i;
902+
903+ printk(KERN_ERR "PAX: bytes at PC: ");
904+ for (i = 0; i < 5; i++) {
905+ unsigned int c;
906+ if (get_user(c, (unsigned int *)pc+i))
907+ printk(KERN_CONT "???????? ");
908+ else
909+ printk(KERN_CONT "%08x ", c);
910+ }
911+ printk("\n");
912+}
913+#endif
914
915 /*
916 * This routine handles page faults. It determines the address,
917@@ -133,8 +251,29 @@ retry:
918 good_area:
919 si_code = SEGV_ACCERR;
920 if (cause < 0) {
921- if (!(vma->vm_flags & VM_EXEC))
922+ if (!(vma->vm_flags & VM_EXEC)) {
923+
924+#ifdef CONFIG_PAX_PAGEEXEC
925+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
926+ goto bad_area;
927+
928+ up_read(&mm->mmap_sem);
929+ switch (pax_handle_fetch_fault(regs)) {
930+
931+#ifdef CONFIG_PAX_EMUPLT
932+ case 2:
933+ case 3:
934+ return;
935+#endif
936+
937+ }
938+ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
939+ do_group_exit(SIGKILL);
940+#else
941 goto bad_area;
942+#endif
943+
944+ }
945 } else if (!cause) {
946 /* Allow reads even for write-only mappings */
947 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
948diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
949index cf4c0c9..a87ecf5 100644
950--- a/arch/arm/Kconfig
951+++ b/arch/arm/Kconfig
952@@ -1735,7 +1735,7 @@ config ALIGNMENT_TRAP
953
954 config UACCESS_WITH_MEMCPY
955 bool "Use kernel mem{cpy,set}() for {copy_to,clear}_user()"
956- depends on MMU
957+ depends on MMU && !PAX_MEMORY_UDEREF
958 default y if CPU_FEROCEON
959 help
960 Implement faster copy_to_user and clear_user methods for CPU
961@@ -1999,6 +1999,7 @@ config XIP_PHYS_ADDR
962 config KEXEC
963 bool "Kexec system call (EXPERIMENTAL)"
964 depends on (!SMP || PM_SLEEP_SMP)
965+ depends on !GRKERNSEC_KMEM
966 help
967 kexec is a system call that implements the ability to shutdown your
968 current kernel, and to start another kernel. It is like a reboot
969diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
970index e22c119..abe7041 100644
971--- a/arch/arm/include/asm/atomic.h
972+++ b/arch/arm/include/asm/atomic.h
973@@ -18,17 +18,41 @@
974 #include <asm/barrier.h>
975 #include <asm/cmpxchg.h>
976
977+#ifdef CONFIG_GENERIC_ATOMIC64
978+#include <asm-generic/atomic64.h>
979+#endif
980+
981 #define ATOMIC_INIT(i) { (i) }
982
983 #ifdef __KERNEL__
984
985+#ifdef CONFIG_THUMB2_KERNEL
986+#define REFCOUNT_TRAP_INSN "bkpt 0xf1"
987+#else
988+#define REFCOUNT_TRAP_INSN "bkpt 0xf103"
989+#endif
990+
991+#define _ASM_EXTABLE(from, to) \
992+" .pushsection __ex_table,\"a\"\n"\
993+" .align 3\n" \
994+" .long " #from ", " #to"\n" \
995+" .popsection"
996+
997 /*
998 * On ARM, ordinary assignment (str instruction) doesn't clear the local
999 * strex/ldrex monitor on some implementations. The reason we can use it for
1000 * atomic_set() is the clrex or dummy strex done on every exception return.
1001 */
1002 #define atomic_read(v) ACCESS_ONCE((v)->counter)
1003+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
1004+{
1005+ return ACCESS_ONCE(v->counter);
1006+}
1007 #define atomic_set(v,i) (((v)->counter) = (i))
1008+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
1009+{
1010+ v->counter = i;
1011+}
1012
1013 #if __LINUX_ARM_ARCH__ >= 6
1014
1015@@ -38,26 +62,50 @@
1016 * to ensure that the update happens.
1017 */
1018
1019-#define ATOMIC_OP(op, c_op, asm_op) \
1020-static inline void atomic_##op(int i, atomic_t *v) \
1021+#ifdef CONFIG_PAX_REFCOUNT
1022+#define __OVERFLOW_POST \
1023+ " bvc 3f\n" \
1024+ "2: " REFCOUNT_TRAP_INSN "\n"\
1025+ "3:\n"
1026+#define __OVERFLOW_POST_RETURN \
1027+ " bvc 3f\n" \
1028+" mov %0, %1\n" \
1029+ "2: " REFCOUNT_TRAP_INSN "\n"\
1030+ "3:\n"
1031+#define __OVERFLOW_EXTABLE \
1032+ "4:\n" \
1033+ _ASM_EXTABLE(2b, 4b)
1034+#else
1035+#define __OVERFLOW_POST
1036+#define __OVERFLOW_POST_RETURN
1037+#define __OVERFLOW_EXTABLE
1038+#endif
1039+
1040+#define __ATOMIC_OP(op, suffix, c_op, asm_op, post_op, extable) \
1041+static inline void atomic_##op##suffix(int i, atomic##suffix##_t *v) \
1042 { \
1043 unsigned long tmp; \
1044 int result; \
1045 \
1046 prefetchw(&v->counter); \
1047- __asm__ __volatile__("@ atomic_" #op "\n" \
1048+ __asm__ __volatile__("@ atomic_" #op #suffix "\n" \
1049 "1: ldrex %0, [%3]\n" \
1050 " " #asm_op " %0, %0, %4\n" \
1051+ post_op \
1052 " strex %1, %0, [%3]\n" \
1053 " teq %1, #0\n" \
1054-" bne 1b" \
1055+" bne 1b\n" \
1056+ extable \
1057 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
1058 : "r" (&v->counter), "Ir" (i) \
1059 : "cc"); \
1060 } \
1061
1062-#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
1063-static inline int atomic_##op##_return(int i, atomic_t *v) \
1064+#define ATOMIC_OP(op, c_op, asm_op) __ATOMIC_OP(op, _unchecked, c_op, asm_op, , )\
1065+ __ATOMIC_OP(op, , c_op, asm_op##s, __OVERFLOW_POST, __OVERFLOW_EXTABLE)
1066+
1067+#define __ATOMIC_OP_RETURN(op, suffix, c_op, asm_op, post_op, extable) \
1068+static inline int atomic_##op##_return##suffix(int i, atomic##suffix##_t *v)\
1069 { \
1070 unsigned long tmp; \
1071 int result; \
1072@@ -65,12 +113,14 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
1073 smp_mb(); \
1074 prefetchw(&v->counter); \
1075 \
1076- __asm__ __volatile__("@ atomic_" #op "_return\n" \
1077+ __asm__ __volatile__("@ atomic_" #op "_return" #suffix "\n" \
1078 "1: ldrex %0, [%3]\n" \
1079 " " #asm_op " %0, %0, %4\n" \
1080+ post_op \
1081 " strex %1, %0, [%3]\n" \
1082 " teq %1, #0\n" \
1083-" bne 1b" \
1084+" bne 1b\n" \
1085+ extable \
1086 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
1087 : "r" (&v->counter), "Ir" (i) \
1088 : "cc"); \
1089@@ -80,6 +130,9 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
1090 return result; \
1091 }
1092
1093+#define ATOMIC_OP_RETURN(op, c_op, asm_op) __ATOMIC_OP_RETURN(op, _unchecked, c_op, asm_op, , )\
1094+ __ATOMIC_OP_RETURN(op, , c_op, asm_op##s, __OVERFLOW_POST_RETURN, __OVERFLOW_EXTABLE)
1095+
1096 static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
1097 {
1098 int oldval;
1099@@ -115,12 +168,24 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1100 __asm__ __volatile__ ("@ atomic_add_unless\n"
1101 "1: ldrex %0, [%4]\n"
1102 " teq %0, %5\n"
1103-" beq 2f\n"
1104-" add %1, %0, %6\n"
1105+" beq 4f\n"
1106+" adds %1, %0, %6\n"
1107+
1108+#ifdef CONFIG_PAX_REFCOUNT
1109+" bvc 3f\n"
1110+"2: " REFCOUNT_TRAP_INSN "\n"
1111+"3:\n"
1112+#endif
1113+
1114 " strex %2, %1, [%4]\n"
1115 " teq %2, #0\n"
1116 " bne 1b\n"
1117-"2:"
1118+"4:"
1119+
1120+#ifdef CONFIG_PAX_REFCOUNT
1121+ _ASM_EXTABLE(2b, 4b)
1122+#endif
1123+
1124 : "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
1125 : "r" (&v->counter), "r" (u), "r" (a)
1126 : "cc");
1127@@ -131,14 +196,36 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1128 return oldval;
1129 }
1130
1131+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *ptr, int old, int new)
1132+{
1133+ unsigned long oldval, res;
1134+
1135+ smp_mb();
1136+
1137+ do {
1138+ __asm__ __volatile__("@ atomic_cmpxchg_unchecked\n"
1139+ "ldrex %1, [%3]\n"
1140+ "mov %0, #0\n"
1141+ "teq %1, %4\n"
1142+ "strexeq %0, %5, [%3]\n"
1143+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1144+ : "r" (&ptr->counter), "Ir" (old), "r" (new)
1145+ : "cc");
1146+ } while (res);
1147+
1148+ smp_mb();
1149+
1150+ return oldval;
1151+}
1152+
1153 #else /* ARM_ARCH_6 */
1154
1155 #ifdef CONFIG_SMP
1156 #error SMP not supported on pre-ARMv6 CPUs
1157 #endif
1158
1159-#define ATOMIC_OP(op, c_op, asm_op) \
1160-static inline void atomic_##op(int i, atomic_t *v) \
1161+#define __ATOMIC_OP(op, suffix, c_op, asm_op) \
1162+static inline void atomic_##op##suffix(int i, atomic##suffix##_t *v) \
1163 { \
1164 unsigned long flags; \
1165 \
1166@@ -147,8 +234,11 @@ static inline void atomic_##op(int i, atomic_t *v) \
1167 raw_local_irq_restore(flags); \
1168 } \
1169
1170-#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
1171-static inline int atomic_##op##_return(int i, atomic_t *v) \
1172+#define ATOMIC_OP(op, c_op, asm_op) __ATOMIC_OP(op, , c_op, asm_op) \
1173+ __ATOMIC_OP(op, _unchecked, c_op, asm_op)
1174+
1175+#define __ATOMIC_OP_RETURN(op, suffix, c_op, asm_op) \
1176+static inline int atomic_##op##_return##suffix(int i, atomic##suffix##_t *v)\
1177 { \
1178 unsigned long flags; \
1179 int val; \
1180@@ -161,6 +251,9 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
1181 return val; \
1182 }
1183
1184+#define ATOMIC_OP_RETURN(op, c_op, asm_op) __ATOMIC_OP_RETURN(op, , c_op, asm_op)\
1185+ __ATOMIC_OP_RETURN(op, _unchecked, c_op, asm_op)
1186+
1187 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1188 {
1189 int ret;
1190@@ -175,6 +268,11 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1191 return ret;
1192 }
1193
1194+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
1195+{
1196+ return atomic_cmpxchg((atomic_t *)v, old, new);
1197+}
1198+
1199 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1200 {
1201 int c, old;
1202@@ -196,16 +294,38 @@ ATOMIC_OPS(sub, -=, sub)
1203
1204 #undef ATOMIC_OPS
1205 #undef ATOMIC_OP_RETURN
1206+#undef __ATOMIC_OP_RETURN
1207 #undef ATOMIC_OP
1208+#undef __ATOMIC_OP
1209
1210 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
1211+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
1212+{
1213+ return xchg(&v->counter, new);
1214+}
1215
1216 #define atomic_inc(v) atomic_add(1, v)
1217+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
1218+{
1219+ atomic_add_unchecked(1, v);
1220+}
1221 #define atomic_dec(v) atomic_sub(1, v)
1222+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
1223+{
1224+ atomic_sub_unchecked(1, v);
1225+}
1226
1227 #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
1228+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
1229+{
1230+ return atomic_add_return_unchecked(1, v) == 0;
1231+}
1232 #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
1233 #define atomic_inc_return(v) (atomic_add_return(1, v))
1234+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
1235+{
1236+ return atomic_add_return_unchecked(1, v);
1237+}
1238 #define atomic_dec_return(v) (atomic_sub_return(1, v))
1239 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
1240
1241@@ -216,6 +336,14 @@ typedef struct {
1242 long long counter;
1243 } atomic64_t;
1244
1245+#ifdef CONFIG_PAX_REFCOUNT
1246+typedef struct {
1247+ long long counter;
1248+} atomic64_unchecked_t;
1249+#else
1250+typedef atomic64_t atomic64_unchecked_t;
1251+#endif
1252+
1253 #define ATOMIC64_INIT(i) { (i) }
1254
1255 #ifdef CONFIG_ARM_LPAE
1256@@ -232,6 +360,19 @@ static inline long long atomic64_read(const atomic64_t *v)
1257 return result;
1258 }
1259
1260+static inline long long atomic64_read_unchecked(const atomic64_unchecked_t *v)
1261+{
1262+ long long result;
1263+
1264+ __asm__ __volatile__("@ atomic64_read_unchecked\n"
1265+" ldrd %0, %H0, [%1]"
1266+ : "=&r" (result)
1267+ : "r" (&v->counter), "Qo" (v->counter)
1268+ );
1269+
1270+ return result;
1271+}
1272+
1273 static inline void atomic64_set(atomic64_t *v, long long i)
1274 {
1275 __asm__ __volatile__("@ atomic64_set\n"
1276@@ -240,6 +381,15 @@ static inline void atomic64_set(atomic64_t *v, long long i)
1277 : "r" (&v->counter), "r" (i)
1278 );
1279 }
1280+
1281+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
1282+{
1283+ __asm__ __volatile__("@ atomic64_set_unchecked\n"
1284+" strd %2, %H2, [%1]"
1285+ : "=Qo" (v->counter)
1286+ : "r" (&v->counter), "r" (i)
1287+ );
1288+}
1289 #else
1290 static inline long long atomic64_read(const atomic64_t *v)
1291 {
1292@@ -254,6 +404,19 @@ static inline long long atomic64_read(const atomic64_t *v)
1293 return result;
1294 }
1295
1296+static inline long long atomic64_read_unchecked(const atomic64_unchecked_t *v)
1297+{
1298+ long long result;
1299+
1300+ __asm__ __volatile__("@ atomic64_read_unchecked\n"
1301+" ldrexd %0, %H0, [%1]"
1302+ : "=&r" (result)
1303+ : "r" (&v->counter), "Qo" (v->counter)
1304+ );
1305+
1306+ return result;
1307+}
1308+
1309 static inline void atomic64_set(atomic64_t *v, long long i)
1310 {
1311 long long tmp;
1312@@ -268,29 +431,57 @@ static inline void atomic64_set(atomic64_t *v, long long i)
1313 : "r" (&v->counter), "r" (i)
1314 : "cc");
1315 }
1316+
1317+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
1318+{
1319+ long long tmp;
1320+
1321+ prefetchw(&v->counter);
1322+ __asm__ __volatile__("@ atomic64_set_unchecked\n"
1323+"1: ldrexd %0, %H0, [%2]\n"
1324+" strexd %0, %3, %H3, [%2]\n"
1325+" teq %0, #0\n"
1326+" bne 1b"
1327+ : "=&r" (tmp), "=Qo" (v->counter)
1328+ : "r" (&v->counter), "r" (i)
1329+ : "cc");
1330+}
1331 #endif
1332
1333-#define ATOMIC64_OP(op, op1, op2) \
1334-static inline void atomic64_##op(long long i, atomic64_t *v) \
1335+#undef __OVERFLOW_POST_RETURN
1336+#define __OVERFLOW_POST_RETURN \
1337+ " bvc 3f\n" \
1338+" mov %0, %1\n" \
1339+" mov %H0, %H1\n" \
1340+ "2: " REFCOUNT_TRAP_INSN "\n"\
1341+ "3:\n"
1342+
1343+#define __ATOMIC64_OP(op, suffix, op1, op2, post_op, extable) \
1344+static inline void atomic64_##op##suffix(long long i, atomic64##suffix##_t *v)\
1345 { \
1346 long long result; \
1347 unsigned long tmp; \
1348 \
1349 prefetchw(&v->counter); \
1350- __asm__ __volatile__("@ atomic64_" #op "\n" \
1351+ __asm__ __volatile__("@ atomic64_" #op #suffix "\n" \
1352 "1: ldrexd %0, %H0, [%3]\n" \
1353 " " #op1 " %Q0, %Q0, %Q4\n" \
1354 " " #op2 " %R0, %R0, %R4\n" \
1355+ post_op \
1356 " strexd %1, %0, %H0, [%3]\n" \
1357 " teq %1, #0\n" \
1358-" bne 1b" \
1359+" bne 1b\n" \
1360+ extable \
1361 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
1362 : "r" (&v->counter), "r" (i) \
1363 : "cc"); \
1364 } \
1365
1366-#define ATOMIC64_OP_RETURN(op, op1, op2) \
1367-static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \
1368+#define ATOMIC64_OP(op, op1, op2) __ATOMIC64_OP(op, _unchecked, op1, op2, , ) \
1369+ __ATOMIC64_OP(op, , op1, op2##s, __OVERFLOW_POST, __OVERFLOW_EXTABLE)
1370+
1371+#define __ATOMIC64_OP_RETURN(op, suffix, op1, op2, post_op, extable) \
1372+static inline long long atomic64_##op##_return##suffix(long long i, atomic64##suffix##_t *v) \
1373 { \
1374 long long result; \
1375 unsigned long tmp; \
1376@@ -298,13 +489,15 @@ static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \
1377 smp_mb(); \
1378 prefetchw(&v->counter); \
1379 \
1380- __asm__ __volatile__("@ atomic64_" #op "_return\n" \
1381+ __asm__ __volatile__("@ atomic64_" #op "_return" #suffix "\n" \
1382 "1: ldrexd %0, %H0, [%3]\n" \
1383 " " #op1 " %Q0, %Q0, %Q4\n" \
1384 " " #op2 " %R0, %R0, %R4\n" \
1385+ post_op \
1386 " strexd %1, %0, %H0, [%3]\n" \
1387 " teq %1, #0\n" \
1388-" bne 1b" \
1389+" bne 1b\n" \
1390+ extable \
1391 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
1392 : "r" (&v->counter), "r" (i) \
1393 : "cc"); \
1394@@ -314,6 +507,9 @@ static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \
1395 return result; \
1396 }
1397
1398+#define ATOMIC64_OP_RETURN(op, op1, op2) __ATOMIC64_OP_RETURN(op, _unchecked, op1, op2, , ) \
1399+ __ATOMIC64_OP_RETURN(op, , op1, op2##s, __OVERFLOW_POST_RETURN, __OVERFLOW_EXTABLE)
1400+
1401 #define ATOMIC64_OPS(op, op1, op2) \
1402 ATOMIC64_OP(op, op1, op2) \
1403 ATOMIC64_OP_RETURN(op, op1, op2)
1404@@ -323,7 +519,12 @@ ATOMIC64_OPS(sub, subs, sbc)
1405
1406 #undef ATOMIC64_OPS
1407 #undef ATOMIC64_OP_RETURN
1408+#undef __ATOMIC64_OP_RETURN
1409 #undef ATOMIC64_OP
1410+#undef __ATOMIC64_OP
1411+#undef __OVERFLOW_EXTABLE
1412+#undef __OVERFLOW_POST_RETURN
1413+#undef __OVERFLOW_POST
1414
1415 static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old,
1416 long long new)
1417@@ -351,6 +552,31 @@ static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old,
1418 return oldval;
1419 }
1420
1421+static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, long long old,
1422+ long long new)
1423+{
1424+ long long oldval;
1425+ unsigned long res;
1426+
1427+ smp_mb();
1428+
1429+ do {
1430+ __asm__ __volatile__("@ atomic64_cmpxchg_unchecked\n"
1431+ "ldrexd %1, %H1, [%3]\n"
1432+ "mov %0, #0\n"
1433+ "teq %1, %4\n"
1434+ "teqeq %H1, %H4\n"
1435+ "strexdeq %0, %5, %H5, [%3]"
1436+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1437+ : "r" (&ptr->counter), "r" (old), "r" (new)
1438+ : "cc");
1439+ } while (res);
1440+
1441+ smp_mb();
1442+
1443+ return oldval;
1444+}
1445+
1446 static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
1447 {
1448 long long result;
1449@@ -376,21 +602,35 @@ static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
1450 static inline long long atomic64_dec_if_positive(atomic64_t *v)
1451 {
1452 long long result;
1453- unsigned long tmp;
1454+ u64 tmp;
1455
1456 smp_mb();
1457 prefetchw(&v->counter);
1458
1459 __asm__ __volatile__("@ atomic64_dec_if_positive\n"
1460-"1: ldrexd %0, %H0, [%3]\n"
1461-" subs %Q0, %Q0, #1\n"
1462-" sbc %R0, %R0, #0\n"
1463+"1: ldrexd %1, %H1, [%3]\n"
1464+" subs %Q0, %Q1, #1\n"
1465+" sbcs %R0, %R1, #0\n"
1466+
1467+#ifdef CONFIG_PAX_REFCOUNT
1468+" bvc 3f\n"
1469+" mov %Q0, %Q1\n"
1470+" mov %R0, %R1\n"
1471+"2: " REFCOUNT_TRAP_INSN "\n"
1472+"3:\n"
1473+#endif
1474+
1475 " teq %R0, #0\n"
1476-" bmi 2f\n"
1477+" bmi 4f\n"
1478 " strexd %1, %0, %H0, [%3]\n"
1479 " teq %1, #0\n"
1480 " bne 1b\n"
1481-"2:"
1482+"4:\n"
1483+
1484+#ifdef CONFIG_PAX_REFCOUNT
1485+ _ASM_EXTABLE(2b, 4b)
1486+#endif
1487+
1488 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1489 : "r" (&v->counter)
1490 : "cc");
1491@@ -414,13 +654,25 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
1492 " teq %0, %5\n"
1493 " teqeq %H0, %H5\n"
1494 " moveq %1, #0\n"
1495-" beq 2f\n"
1496+" beq 4f\n"
1497 " adds %Q0, %Q0, %Q6\n"
1498-" adc %R0, %R0, %R6\n"
1499+" adcs %R0, %R0, %R6\n"
1500+
1501+#ifdef CONFIG_PAX_REFCOUNT
1502+" bvc 3f\n"
1503+"2: " REFCOUNT_TRAP_INSN "\n"
1504+"3:\n"
1505+#endif
1506+
1507 " strexd %2, %0, %H0, [%4]\n"
1508 " teq %2, #0\n"
1509 " bne 1b\n"
1510-"2:"
1511+"4:\n"
1512+
1513+#ifdef CONFIG_PAX_REFCOUNT
1514+ _ASM_EXTABLE(2b, 4b)
1515+#endif
1516+
1517 : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
1518 : "r" (&v->counter), "r" (u), "r" (a)
1519 : "cc");
1520@@ -433,10 +685,13 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
1521
1522 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
1523 #define atomic64_inc(v) atomic64_add(1LL, (v))
1524+#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1LL, (v))
1525 #define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
1526+#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1LL, (v))
1527 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
1528 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
1529 #define atomic64_dec(v) atomic64_sub(1LL, (v))
1530+#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1LL, (v))
1531 #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
1532 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
1533 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
1534diff --git a/arch/arm/include/asm/barrier.h b/arch/arm/include/asm/barrier.h
1535index d2f81e6..3c4dba5 100644
1536--- a/arch/arm/include/asm/barrier.h
1537+++ b/arch/arm/include/asm/barrier.h
1538@@ -67,7 +67,7 @@
1539 do { \
1540 compiletime_assert_atomic_type(*p); \
1541 smp_mb(); \
1542- ACCESS_ONCE(*p) = (v); \
1543+ ACCESS_ONCE_RW(*p) = (v); \
1544 } while (0)
1545
1546 #define smp_load_acquire(p) \
1547diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
1548index 75fe66b..ba3dee4 100644
1549--- a/arch/arm/include/asm/cache.h
1550+++ b/arch/arm/include/asm/cache.h
1551@@ -4,8 +4,10 @@
1552 #ifndef __ASMARM_CACHE_H
1553 #define __ASMARM_CACHE_H
1554
1555+#include <linux/const.h>
1556+
1557 #define L1_CACHE_SHIFT CONFIG_ARM_L1_CACHE_SHIFT
1558-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
1559+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1560
1561 /*
1562 * Memory returned by kmalloc() may be used for DMA, so we must make
1563@@ -24,5 +26,6 @@
1564 #endif
1565
1566 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
1567+#define __read_only __attribute__ ((__section__(".data..read_only")))
1568
1569 #endif
1570diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
1571index 2d46862..a35415b 100644
1572--- a/arch/arm/include/asm/cacheflush.h
1573+++ b/arch/arm/include/asm/cacheflush.h
1574@@ -116,7 +116,7 @@ struct cpu_cache_fns {
1575 void (*dma_unmap_area)(const void *, size_t, int);
1576
1577 void (*dma_flush_range)(const void *, const void *);
1578-};
1579+} __no_const;
1580
1581 /*
1582 * Select the calling method
1583diff --git a/arch/arm/include/asm/checksum.h b/arch/arm/include/asm/checksum.h
1584index 5233151..87a71fa 100644
1585--- a/arch/arm/include/asm/checksum.h
1586+++ b/arch/arm/include/asm/checksum.h
1587@@ -37,7 +37,19 @@ __wsum
1588 csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum);
1589
1590 __wsum
1591-csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
1592+__csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
1593+
1594+static inline __wsum
1595+csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr)
1596+{
1597+ __wsum ret;
1598+ pax_open_userland();
1599+ ret = __csum_partial_copy_from_user(src, dst, len, sum, err_ptr);
1600+ pax_close_userland();
1601+ return ret;
1602+}
1603+
1604+
1605
1606 /*
1607 * Fold a partial checksum without adding pseudo headers
1608diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h
1609index abb2c37..96db950 100644
1610--- a/arch/arm/include/asm/cmpxchg.h
1611+++ b/arch/arm/include/asm/cmpxchg.h
1612@@ -104,6 +104,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
1613
1614 #define xchg(ptr,x) \
1615 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1616+#define xchg_unchecked(ptr,x) \
1617+ ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1618
1619 #include <asm-generic/cmpxchg-local.h>
1620
1621diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h
1622index 6ddbe44..b5e38b1a 100644
1623--- a/arch/arm/include/asm/domain.h
1624+++ b/arch/arm/include/asm/domain.h
1625@@ -48,18 +48,37 @@
1626 * Domain types
1627 */
1628 #define DOMAIN_NOACCESS 0
1629-#define DOMAIN_CLIENT 1
1630 #ifdef CONFIG_CPU_USE_DOMAINS
1631+#define DOMAIN_USERCLIENT 1
1632+#define DOMAIN_KERNELCLIENT 1
1633 #define DOMAIN_MANAGER 3
1634+#define DOMAIN_VECTORS DOMAIN_USER
1635 #else
1636+
1637+#ifdef CONFIG_PAX_KERNEXEC
1638 #define DOMAIN_MANAGER 1
1639+#define DOMAIN_KERNEXEC 3
1640+#else
1641+#define DOMAIN_MANAGER 1
1642+#endif
1643+
1644+#ifdef CONFIG_PAX_MEMORY_UDEREF
1645+#define DOMAIN_USERCLIENT 0
1646+#define DOMAIN_UDEREF 1
1647+#define DOMAIN_VECTORS DOMAIN_KERNEL
1648+#else
1649+#define DOMAIN_USERCLIENT 1
1650+#define DOMAIN_VECTORS DOMAIN_USER
1651+#endif
1652+#define DOMAIN_KERNELCLIENT 1
1653+
1654 #endif
1655
1656 #define domain_val(dom,type) ((type) << (2*(dom)))
1657
1658 #ifndef __ASSEMBLY__
1659
1660-#ifdef CONFIG_CPU_USE_DOMAINS
1661+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
1662 static inline void set_domain(unsigned val)
1663 {
1664 asm volatile(
1665@@ -68,15 +87,7 @@ static inline void set_domain(unsigned val)
1666 isb();
1667 }
1668
1669-#define modify_domain(dom,type) \
1670- do { \
1671- struct thread_info *thread = current_thread_info(); \
1672- unsigned int domain = thread->cpu_domain; \
1673- domain &= ~domain_val(dom, DOMAIN_MANAGER); \
1674- thread->cpu_domain = domain | domain_val(dom, type); \
1675- set_domain(thread->cpu_domain); \
1676- } while (0)
1677-
1678+extern void modify_domain(unsigned int dom, unsigned int type);
1679 #else
1680 static inline void set_domain(unsigned val) { }
1681 static inline void modify_domain(unsigned dom, unsigned type) { }
1682diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
1683index 674d03f..9a0bac0 100644
1684--- a/arch/arm/include/asm/elf.h
1685+++ b/arch/arm/include/asm/elf.h
1686@@ -115,7 +115,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1687 the loader. We need to make sure that it is out of the way of the program
1688 that it will "exec", and that there is sufficient room for the brk. */
1689
1690-#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1691+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1692+
1693+#ifdef CONFIG_PAX_ASLR
1694+#define PAX_ELF_ET_DYN_BASE 0x00008000UL
1695+
1696+#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1697+#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1698+#endif
1699
1700 /* When the program starts, a1 contains a pointer to a function to be
1701 registered with atexit, as per the SVR4 ABI. A value of 0 means we
1702@@ -125,10 +132,6 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1703 extern void elf_set_personality(const struct elf32_hdr *);
1704 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
1705
1706-struct mm_struct;
1707-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1708-#define arch_randomize_brk arch_randomize_brk
1709-
1710 #ifdef CONFIG_MMU
1711 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
1712 struct linux_binprm;
1713diff --git a/arch/arm/include/asm/fncpy.h b/arch/arm/include/asm/fncpy.h
1714index de53547..52b9a28 100644
1715--- a/arch/arm/include/asm/fncpy.h
1716+++ b/arch/arm/include/asm/fncpy.h
1717@@ -81,7 +81,9 @@
1718 BUG_ON((uintptr_t)(dest_buf) & (FNCPY_ALIGN - 1) || \
1719 (__funcp_address & ~(uintptr_t)1 & (FNCPY_ALIGN - 1))); \
1720 \
1721+ pax_open_kernel(); \
1722 memcpy(dest_buf, (void const *)(__funcp_address & ~1), size); \
1723+ pax_close_kernel(); \
1724 flush_icache_range((unsigned long)(dest_buf), \
1725 (unsigned long)(dest_buf) + (size)); \
1726 \
1727diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
1728index 53e69da..3fdc896 100644
1729--- a/arch/arm/include/asm/futex.h
1730+++ b/arch/arm/include/asm/futex.h
1731@@ -46,6 +46,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1732 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
1733 return -EFAULT;
1734
1735+ pax_open_userland();
1736+
1737 smp_mb();
1738 /* Prefetching cannot fault */
1739 prefetchw(uaddr);
1740@@ -63,6 +65,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1741 : "cc", "memory");
1742 smp_mb();
1743
1744+ pax_close_userland();
1745+
1746 *uval = val;
1747 return ret;
1748 }
1749@@ -93,6 +97,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1750 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
1751 return -EFAULT;
1752
1753+ pax_open_userland();
1754+
1755 __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
1756 "1: " TUSER(ldr) " %1, [%4]\n"
1757 " teq %1, %2\n"
1758@@ -103,6 +109,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1759 : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
1760 : "cc", "memory");
1761
1762+ pax_close_userland();
1763+
1764 *uval = val;
1765 return ret;
1766 }
1767@@ -125,6 +133,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
1768 return -EFAULT;
1769
1770 pagefault_disable(); /* implies preempt_disable() */
1771+ pax_open_userland();
1772
1773 switch (op) {
1774 case FUTEX_OP_SET:
1775@@ -146,6 +155,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
1776 ret = -ENOSYS;
1777 }
1778
1779+ pax_close_userland();
1780 pagefault_enable(); /* subsumes preempt_enable() */
1781
1782 if (!ret) {
1783diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
1784index 83eb2f7..ed77159 100644
1785--- a/arch/arm/include/asm/kmap_types.h
1786+++ b/arch/arm/include/asm/kmap_types.h
1787@@ -4,6 +4,6 @@
1788 /*
1789 * This is the "bare minimum". AIO seems to require this.
1790 */
1791-#define KM_TYPE_NR 16
1792+#define KM_TYPE_NR 17
1793
1794 #endif
1795diff --git a/arch/arm/include/asm/mach/dma.h b/arch/arm/include/asm/mach/dma.h
1796index 9e614a1..3302cca 100644
1797--- a/arch/arm/include/asm/mach/dma.h
1798+++ b/arch/arm/include/asm/mach/dma.h
1799@@ -22,7 +22,7 @@ struct dma_ops {
1800 int (*residue)(unsigned int, dma_t *); /* optional */
1801 int (*setspeed)(unsigned int, dma_t *, int); /* optional */
1802 const char *type;
1803-};
1804+} __do_const;
1805
1806 struct dma_struct {
1807 void *addr; /* single DMA address */
1808diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h
1809index f98c7f3..e5c626d 100644
1810--- a/arch/arm/include/asm/mach/map.h
1811+++ b/arch/arm/include/asm/mach/map.h
1812@@ -23,17 +23,19 @@ struct map_desc {
1813
1814 /* types 0-3 are defined in asm/io.h */
1815 enum {
1816- MT_UNCACHED = 4,
1817- MT_CACHECLEAN,
1818- MT_MINICLEAN,
1819+ MT_UNCACHED_RW = 4,
1820+ MT_CACHECLEAN_RO,
1821+ MT_MINICLEAN_RO,
1822 MT_LOW_VECTORS,
1823 MT_HIGH_VECTORS,
1824- MT_MEMORY_RWX,
1825+ __MT_MEMORY_RWX,
1826 MT_MEMORY_RW,
1827- MT_ROM,
1828- MT_MEMORY_RWX_NONCACHED,
1829+ MT_MEMORY_RX,
1830+ MT_ROM_RX,
1831+ MT_MEMORY_RW_NONCACHED,
1832+ MT_MEMORY_RX_NONCACHED,
1833 MT_MEMORY_RW_DTCM,
1834- MT_MEMORY_RWX_ITCM,
1835+ MT_MEMORY_RX_ITCM,
1836 MT_MEMORY_RW_SO,
1837 MT_MEMORY_DMA_READY,
1838 };
1839diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
1840index 563b92f..689d58e 100644
1841--- a/arch/arm/include/asm/outercache.h
1842+++ b/arch/arm/include/asm/outercache.h
1843@@ -39,7 +39,7 @@ struct outer_cache_fns {
1844 /* This is an ARM L2C thing */
1845 void (*write_sec)(unsigned long, unsigned);
1846 void (*configure)(const struct l2x0_regs *);
1847-};
1848+} __no_const;
1849
1850 extern struct outer_cache_fns outer_cache;
1851
1852diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
1853index 4355f0e..cd9168e 100644
1854--- a/arch/arm/include/asm/page.h
1855+++ b/arch/arm/include/asm/page.h
1856@@ -23,6 +23,7 @@
1857
1858 #else
1859
1860+#include <linux/compiler.h>
1861 #include <asm/glue.h>
1862
1863 /*
1864@@ -114,7 +115,7 @@ struct cpu_user_fns {
1865 void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);
1866 void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
1867 unsigned long vaddr, struct vm_area_struct *vma);
1868-};
1869+} __no_const;
1870
1871 #ifdef MULTI_USER
1872 extern struct cpu_user_fns cpu_user;
1873diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
1874index 19cfab5..3f5c7e9 100644
1875--- a/arch/arm/include/asm/pgalloc.h
1876+++ b/arch/arm/include/asm/pgalloc.h
1877@@ -17,6 +17,7 @@
1878 #include <asm/processor.h>
1879 #include <asm/cacheflush.h>
1880 #include <asm/tlbflush.h>
1881+#include <asm/system_info.h>
1882
1883 #define check_pgt_cache() do { } while (0)
1884
1885@@ -43,6 +44,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1886 set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
1887 }
1888
1889+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1890+{
1891+ pud_populate(mm, pud, pmd);
1892+}
1893+
1894 #else /* !CONFIG_ARM_LPAE */
1895
1896 /*
1897@@ -51,6 +57,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1898 #define pmd_alloc_one(mm,addr) ({ BUG(); ((pmd_t *)2); })
1899 #define pmd_free(mm, pmd) do { } while (0)
1900 #define pud_populate(mm,pmd,pte) BUG()
1901+#define pud_populate_kernel(mm,pmd,pte) BUG()
1902
1903 #endif /* CONFIG_ARM_LPAE */
1904
1905@@ -128,6 +135,19 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
1906 __free_page(pte);
1907 }
1908
1909+static inline void __section_update(pmd_t *pmdp, unsigned long addr, pmdval_t prot)
1910+{
1911+#ifdef CONFIG_ARM_LPAE
1912+ pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
1913+#else
1914+ if (addr & SECTION_SIZE)
1915+ pmdp[1] = __pmd(pmd_val(pmdp[1]) | prot);
1916+ else
1917+ pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
1918+#endif
1919+ flush_pmd_entry(pmdp);
1920+}
1921+
1922 static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t pte,
1923 pmdval_t prot)
1924 {
1925diff --git a/arch/arm/include/asm/pgtable-2level-hwdef.h b/arch/arm/include/asm/pgtable-2level-hwdef.h
1926index 5e68278..1869bae 100644
1927--- a/arch/arm/include/asm/pgtable-2level-hwdef.h
1928+++ b/arch/arm/include/asm/pgtable-2level-hwdef.h
1929@@ -27,7 +27,7 @@
1930 /*
1931 * - section
1932 */
1933-#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 0) /* v7 */
1934+#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 0) /* v7 */
1935 #define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2)
1936 #define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3)
1937 #define PMD_SECT_XN (_AT(pmdval_t, 1) << 4) /* v6 */
1938@@ -39,6 +39,7 @@
1939 #define PMD_SECT_nG (_AT(pmdval_t, 1) << 17) /* v6 */
1940 #define PMD_SECT_SUPER (_AT(pmdval_t, 1) << 18) /* v6 */
1941 #define PMD_SECT_AF (_AT(pmdval_t, 0))
1942+#define PMD_SECT_RDONLY (_AT(pmdval_t, 0))
1943
1944 #define PMD_SECT_UNCACHED (_AT(pmdval_t, 0))
1945 #define PMD_SECT_BUFFERED (PMD_SECT_BUFFERABLE)
1946@@ -68,6 +69,7 @@
1947 * - extended small page/tiny page
1948 */
1949 #define PTE_EXT_XN (_AT(pteval_t, 1) << 0) /* v6 */
1950+#define PTE_EXT_PXN (_AT(pteval_t, 1) << 2) /* v7 */
1951 #define PTE_EXT_AP_MASK (_AT(pteval_t, 3) << 4)
1952 #define PTE_EXT_AP0 (_AT(pteval_t, 1) << 4)
1953 #define PTE_EXT_AP1 (_AT(pteval_t, 2) << 4)
1954diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
1955index bfd662e..f6cbb02 100644
1956--- a/arch/arm/include/asm/pgtable-2level.h
1957+++ b/arch/arm/include/asm/pgtable-2level.h
1958@@ -127,6 +127,9 @@
1959 #define L_PTE_SHARED (_AT(pteval_t, 1) << 10) /* shared(v6), coherent(xsc3) */
1960 #define L_PTE_NONE (_AT(pteval_t, 1) << 11)
1961
1962+/* Two-level page tables only have PXN in the PGD, not in the PTE. */
1963+#define L_PTE_PXN (_AT(pteval_t, 0))
1964+
1965 /*
1966 * These are the memory types, defined to be compatible with
1967 * pre-ARMv6 CPUs cacheable and bufferable bits: XXCB
1968diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
1969index a745a2a..481350a 100644
1970--- a/arch/arm/include/asm/pgtable-3level.h
1971+++ b/arch/arm/include/asm/pgtable-3level.h
1972@@ -80,6 +80,7 @@
1973 #define L_PTE_USER (_AT(pteval_t, 1) << 6) /* AP[1] */
1974 #define L_PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
1975 #define L_PTE_YOUNG (_AT(pteval_t, 1) << 10) /* AF */
1976+#define L_PTE_PXN (_AT(pteval_t, 1) << 53) /* PXN */
1977 #define L_PTE_XN (_AT(pteval_t, 1) << 54) /* XN */
1978 #define L_PTE_DIRTY (_AT(pteval_t, 1) << 55)
1979 #define L_PTE_SPECIAL (_AT(pteval_t, 1) << 56)
1980@@ -91,10 +92,12 @@
1981 #define L_PMD_SECT_SPLITTING (_AT(pmdval_t, 1) << 56)
1982 #define L_PMD_SECT_NONE (_AT(pmdval_t, 1) << 57)
1983 #define L_PMD_SECT_RDONLY (_AT(pteval_t, 1) << 58)
1984+#define PMD_SECT_RDONLY PMD_SECT_AP2
1985
1986 /*
1987 * To be used in assembly code with the upper page attributes.
1988 */
1989+#define L_PTE_PXN_HIGH (1 << (53 - 32))
1990 #define L_PTE_XN_HIGH (1 << (54 - 32))
1991 #define L_PTE_DIRTY_HIGH (1 << (55 - 32))
1992
1993diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
1994index f403541..b10df68 100644
1995--- a/arch/arm/include/asm/pgtable.h
1996+++ b/arch/arm/include/asm/pgtable.h
1997@@ -33,6 +33,9 @@
1998 #include <asm/pgtable-2level.h>
1999 #endif
2000
2001+#define ktla_ktva(addr) (addr)
2002+#define ktva_ktla(addr) (addr)
2003+
2004 /*
2005 * Just any arbitrary offset to the start of the vmalloc VM area: the
2006 * current 8MB value just means that there will be a 8MB "hole" after the
2007@@ -48,6 +51,9 @@
2008 #define LIBRARY_TEXT_START 0x0c000000
2009
2010 #ifndef __ASSEMBLY__
2011+extern pteval_t __supported_pte_mask;
2012+extern pmdval_t __supported_pmd_mask;
2013+
2014 extern void __pte_error(const char *file, int line, pte_t);
2015 extern void __pmd_error(const char *file, int line, pmd_t);
2016 extern void __pgd_error(const char *file, int line, pgd_t);
2017@@ -56,6 +62,48 @@ extern void __pgd_error(const char *file, int line, pgd_t);
2018 #define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd)
2019 #define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd)
2020
2021+#define __HAVE_ARCH_PAX_OPEN_KERNEL
2022+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
2023+
2024+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2025+#include <asm/domain.h>
2026+#include <linux/thread_info.h>
2027+#include <linux/preempt.h>
2028+
2029+static inline int test_domain(int domain, int domaintype)
2030+{
2031+ return ((current_thread_info()->cpu_domain) & domain_val(domain, 3)) == domain_val(domain, domaintype);
2032+}
2033+#endif
2034+
2035+#ifdef CONFIG_PAX_KERNEXEC
2036+static inline unsigned long pax_open_kernel(void) {
2037+#ifdef CONFIG_ARM_LPAE
2038+ /* TODO */
2039+#else
2040+ preempt_disable();
2041+ BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC));
2042+ modify_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC);
2043+#endif
2044+ return 0;
2045+}
2046+
2047+static inline unsigned long pax_close_kernel(void) {
2048+#ifdef CONFIG_ARM_LPAE
2049+ /* TODO */
2050+#else
2051+ BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_MANAGER));
2052+ /* DOMAIN_MANAGER = "client" under KERNEXEC */
2053+ modify_domain(DOMAIN_KERNEL, DOMAIN_MANAGER);
2054+ preempt_enable_no_resched();
2055+#endif
2056+ return 0;
2057+}
2058+#else
2059+static inline unsigned long pax_open_kernel(void) { return 0; }
2060+static inline unsigned long pax_close_kernel(void) { return 0; }
2061+#endif
2062+
2063 /*
2064 * This is the lowest virtual address we can permit any user space
2065 * mapping to be mapped at. This is particularly important for
2066@@ -75,8 +123,8 @@ extern void __pgd_error(const char *file, int line, pgd_t);
2067 /*
2068 * The pgprot_* and protection_map entries will be fixed up in runtime
2069 * to include the cachable and bufferable bits based on memory policy,
2070- * as well as any architecture dependent bits like global/ASID and SMP
2071- * shared mapping bits.
2072+ * as well as any architecture dependent bits like global/ASID, PXN,
2073+ * and SMP shared mapping bits.
2074 */
2075 #define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG
2076
2077@@ -307,7 +355,7 @@ static inline pte_t pte_mknexec(pte_t pte)
2078 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
2079 {
2080 const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER |
2081- L_PTE_NONE | L_PTE_VALID;
2082+ L_PTE_NONE | L_PTE_VALID | __supported_pte_mask;
2083 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
2084 return pte;
2085 }
2086diff --git a/arch/arm/include/asm/psci.h b/arch/arm/include/asm/psci.h
2087index c25ef3e..735f14b 100644
2088--- a/arch/arm/include/asm/psci.h
2089+++ b/arch/arm/include/asm/psci.h
2090@@ -32,7 +32,7 @@ struct psci_operations {
2091 int (*affinity_info)(unsigned long target_affinity,
2092 unsigned long lowest_affinity_level);
2093 int (*migrate_info_type)(void);
2094-};
2095+} __no_const;
2096
2097 extern struct psci_operations psci_ops;
2098 extern struct smp_operations psci_smp_ops;
2099diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
2100index 18f5a55..5072a40 100644
2101--- a/arch/arm/include/asm/smp.h
2102+++ b/arch/arm/include/asm/smp.h
2103@@ -107,7 +107,7 @@ struct smp_operations {
2104 int (*cpu_disable)(unsigned int cpu);
2105 #endif
2106 #endif
2107-};
2108+} __no_const;
2109
2110 struct of_cpu_method {
2111 const char *method;
2112diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
2113index 72812a1..335f4f3 100644
2114--- a/arch/arm/include/asm/thread_info.h
2115+++ b/arch/arm/include/asm/thread_info.h
2116@@ -77,9 +77,9 @@ struct thread_info {
2117 .flags = 0, \
2118 .preempt_count = INIT_PREEMPT_COUNT, \
2119 .addr_limit = KERNEL_DS, \
2120- .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
2121- domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
2122- domain_val(DOMAIN_IO, DOMAIN_CLIENT), \
2123+ .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_USERCLIENT) | \
2124+ domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT) | \
2125+ domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT), \
2126 }
2127
2128 #define init_thread_info (init_thread_union.thread_info)
2129@@ -155,7 +155,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
2130 #define TIF_SYSCALL_AUDIT 9
2131 #define TIF_SYSCALL_TRACEPOINT 10
2132 #define TIF_SECCOMP 11 /* seccomp syscall filtering active */
2133-#define TIF_NOHZ 12 /* in adaptive nohz mode */
2134+/* within 8 bits of TIF_SYSCALL_TRACE
2135+ * to meet flexible second operand requirements
2136+ */
2137+#define TIF_GRSEC_SETXID 12
2138+#define TIF_NOHZ 13 /* in adaptive nohz mode */
2139 #define TIF_USING_IWMMXT 17
2140 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
2141 #define TIF_RESTORE_SIGMASK 20
2142@@ -169,10 +173,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
2143 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
2144 #define _TIF_SECCOMP (1 << TIF_SECCOMP)
2145 #define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
2146+#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
2147
2148 /* Checks for any syscall work in entry-common.S */
2149 #define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
2150- _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
2151+ _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | _TIF_GRSEC_SETXID)
2152
2153 /*
2154 * Change these and you break ASM code in entry-common.S
2155diff --git a/arch/arm/include/asm/tls.h b/arch/arm/include/asm/tls.h
2156index 5f833f7..76e6644 100644
2157--- a/arch/arm/include/asm/tls.h
2158+++ b/arch/arm/include/asm/tls.h
2159@@ -3,6 +3,7 @@
2160
2161 #include <linux/compiler.h>
2162 #include <asm/thread_info.h>
2163+#include <asm/pgtable.h>
2164
2165 #ifdef __ASSEMBLY__
2166 #include <asm/asm-offsets.h>
2167@@ -89,7 +90,9 @@ static inline void set_tls(unsigned long val)
2168 * at 0xffff0fe0 must be used instead. (see
2169 * entry-armv.S for details)
2170 */
2171+ pax_open_kernel();
2172 *((unsigned int *)0xffff0ff0) = val;
2173+ pax_close_kernel();
2174 #endif
2175 }
2176
2177diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
2178index ce0786e..a80c264 100644
2179--- a/arch/arm/include/asm/uaccess.h
2180+++ b/arch/arm/include/asm/uaccess.h
2181@@ -18,6 +18,7 @@
2182 #include <asm/domain.h>
2183 #include <asm/unified.h>
2184 #include <asm/compiler.h>
2185+#include <asm/pgtable.h>
2186
2187 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2188 #include <asm-generic/uaccess-unaligned.h>
2189@@ -70,11 +71,38 @@ extern int __put_user_bad(void);
2190 static inline void set_fs(mm_segment_t fs)
2191 {
2192 current_thread_info()->addr_limit = fs;
2193- modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
2194+ modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_KERNELCLIENT : DOMAIN_MANAGER);
2195 }
2196
2197 #define segment_eq(a, b) ((a) == (b))
2198
2199+#define __HAVE_ARCH_PAX_OPEN_USERLAND
2200+#define __HAVE_ARCH_PAX_CLOSE_USERLAND
2201+
2202+static inline void pax_open_userland(void)
2203+{
2204+
2205+#ifdef CONFIG_PAX_MEMORY_UDEREF
2206+ if (segment_eq(get_fs(), USER_DS)) {
2207+ BUG_ON(test_domain(DOMAIN_USER, DOMAIN_UDEREF));
2208+ modify_domain(DOMAIN_USER, DOMAIN_UDEREF);
2209+ }
2210+#endif
2211+
2212+}
2213+
2214+static inline void pax_close_userland(void)
2215+{
2216+
2217+#ifdef CONFIG_PAX_MEMORY_UDEREF
2218+ if (segment_eq(get_fs(), USER_DS)) {
2219+ BUG_ON(test_domain(DOMAIN_USER, DOMAIN_NOACCESS));
2220+ modify_domain(DOMAIN_USER, DOMAIN_NOACCESS);
2221+ }
2222+#endif
2223+
2224+}
2225+
2226 #define __addr_ok(addr) ({ \
2227 unsigned long flag; \
2228 __asm__("cmp %2, %0; movlo %0, #0" \
2229@@ -198,8 +226,12 @@ extern int __get_user_64t_4(void *);
2230
2231 #define get_user(x, p) \
2232 ({ \
2233+ int __e; \
2234 might_fault(); \
2235- __get_user_check(x, p); \
2236+ pax_open_userland(); \
2237+ __e = __get_user_check((x), (p)); \
2238+ pax_close_userland(); \
2239+ __e; \
2240 })
2241
2242 extern int __put_user_1(void *, unsigned int);
2243@@ -244,8 +276,12 @@ extern int __put_user_8(void *, unsigned long long);
2244
2245 #define put_user(x, p) \
2246 ({ \
2247+ int __e; \
2248 might_fault(); \
2249- __put_user_check(x, p); \
2250+ pax_open_userland(); \
2251+ __e = __put_user_check((x), (p)); \
2252+ pax_close_userland(); \
2253+ __e; \
2254 })
2255
2256 #else /* CONFIG_MMU */
2257@@ -269,6 +305,7 @@ static inline void set_fs(mm_segment_t fs)
2258
2259 #endif /* CONFIG_MMU */
2260
2261+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
2262 #define access_ok(type, addr, size) (__range_ok(addr, size) == 0)
2263
2264 #define user_addr_max() \
2265@@ -286,13 +323,17 @@ static inline void set_fs(mm_segment_t fs)
2266 #define __get_user(x, ptr) \
2267 ({ \
2268 long __gu_err = 0; \
2269+ pax_open_userland(); \
2270 __get_user_err((x), (ptr), __gu_err); \
2271+ pax_close_userland(); \
2272 __gu_err; \
2273 })
2274
2275 #define __get_user_error(x, ptr, err) \
2276 ({ \
2277+ pax_open_userland(); \
2278 __get_user_err((x), (ptr), err); \
2279+ pax_close_userland(); \
2280 (void) 0; \
2281 })
2282
2283@@ -368,13 +409,17 @@ do { \
2284 #define __put_user(x, ptr) \
2285 ({ \
2286 long __pu_err = 0; \
2287+ pax_open_userland(); \
2288 __put_user_err((x), (ptr), __pu_err); \
2289+ pax_close_userland(); \
2290 __pu_err; \
2291 })
2292
2293 #define __put_user_error(x, ptr, err) \
2294 ({ \
2295+ pax_open_userland(); \
2296 __put_user_err((x), (ptr), err); \
2297+ pax_close_userland(); \
2298 (void) 0; \
2299 })
2300
2301@@ -474,11 +519,44 @@ do { \
2302
2303
2304 #ifdef CONFIG_MMU
2305-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
2306-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
2307+extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
2308+extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
2309+
2310+static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
2311+{
2312+ unsigned long ret;
2313+
2314+ check_object_size(to, n, false);
2315+ pax_open_userland();
2316+ ret = ___copy_from_user(to, from, n);
2317+ pax_close_userland();
2318+ return ret;
2319+}
2320+
2321+static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
2322+{
2323+ unsigned long ret;
2324+
2325+ check_object_size(from, n, true);
2326+ pax_open_userland();
2327+ ret = ___copy_to_user(to, from, n);
2328+ pax_close_userland();
2329+ return ret;
2330+}
2331+
2332 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
2333-extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
2334+extern unsigned long __must_check ___clear_user(void __user *addr, unsigned long n);
2335 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
2336+
2337+static inline unsigned long __must_check __clear_user(void __user *addr, unsigned long n)
2338+{
2339+ unsigned long ret;
2340+ pax_open_userland();
2341+ ret = ___clear_user(addr, n);
2342+ pax_close_userland();
2343+ return ret;
2344+}
2345+
2346 #else
2347 #define __copy_from_user(to, from, n) (memcpy(to, (void __force *)from, n), 0)
2348 #define __copy_to_user(to, from, n) (memcpy((void __force *)to, from, n), 0)
2349@@ -487,6 +565,9 @@ extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned l
2350
2351 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
2352 {
2353+ if ((long)n < 0)
2354+ return n;
2355+
2356 if (access_ok(VERIFY_READ, from, n))
2357 n = __copy_from_user(to, from, n);
2358 else /* security hole - plug it */
2359@@ -496,6 +577,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
2360
2361 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
2362 {
2363+ if ((long)n < 0)
2364+ return n;
2365+
2366 if (access_ok(VERIFY_WRITE, to, n))
2367 n = __copy_to_user(to, from, n);
2368 return n;
2369diff --git a/arch/arm/include/uapi/asm/ptrace.h b/arch/arm/include/uapi/asm/ptrace.h
2370index 5af0ed1..cea83883 100644
2371--- a/arch/arm/include/uapi/asm/ptrace.h
2372+++ b/arch/arm/include/uapi/asm/ptrace.h
2373@@ -92,7 +92,7 @@
2374 * ARMv7 groups of PSR bits
2375 */
2376 #define APSR_MASK 0xf80f0000 /* N, Z, C, V, Q and GE flags */
2377-#define PSR_ISET_MASK 0x01000010 /* ISA state (J, T) mask */
2378+#define PSR_ISET_MASK 0x01000020 /* ISA state (J, T) mask */
2379 #define PSR_IT_MASK 0x0600fc00 /* If-Then execution state mask */
2380 #define PSR_ENDIAN_MASK 0x00000200 /* Endianness state mask */
2381
2382diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
2383index a88671c..1cc895e 100644
2384--- a/arch/arm/kernel/armksyms.c
2385+++ b/arch/arm/kernel/armksyms.c
2386@@ -55,7 +55,7 @@ EXPORT_SYMBOL(arm_delay_ops);
2387
2388 /* networking */
2389 EXPORT_SYMBOL(csum_partial);
2390-EXPORT_SYMBOL(csum_partial_copy_from_user);
2391+EXPORT_SYMBOL(__csum_partial_copy_from_user);
2392 EXPORT_SYMBOL(csum_partial_copy_nocheck);
2393 EXPORT_SYMBOL(__csum_ipv6_magic);
2394
2395@@ -91,9 +91,9 @@ EXPORT_SYMBOL(__memzero);
2396 #ifdef CONFIG_MMU
2397 EXPORT_SYMBOL(copy_page);
2398
2399-EXPORT_SYMBOL(__copy_from_user);
2400-EXPORT_SYMBOL(__copy_to_user);
2401-EXPORT_SYMBOL(__clear_user);
2402+EXPORT_SYMBOL(___copy_from_user);
2403+EXPORT_SYMBOL(___copy_to_user);
2404+EXPORT_SYMBOL(___clear_user);
2405
2406 EXPORT_SYMBOL(__get_user_1);
2407 EXPORT_SYMBOL(__get_user_2);
2408diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
2409index 672b219..4aa120a 100644
2410--- a/arch/arm/kernel/entry-armv.S
2411+++ b/arch/arm/kernel/entry-armv.S
2412@@ -48,6 +48,87 @@
2413 9997:
2414 .endm
2415
2416+ .macro pax_enter_kernel
2417+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2418+ @ make aligned space for saved DACR
2419+ sub sp, sp, #8
2420+ @ save regs
2421+ stmdb sp!, {r1, r2}
2422+ @ read DACR from cpu_domain into r1
2423+ mov r2, sp
2424+ @ assume 8K pages, since we have to split the immediate in two
2425+ bic r2, r2, #(0x1fc0)
2426+ bic r2, r2, #(0x3f)
2427+ ldr r1, [r2, #TI_CPU_DOMAIN]
2428+ @ store old DACR on stack
2429+ str r1, [sp, #8]
2430+#ifdef CONFIG_PAX_KERNEXEC
2431+ @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2432+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2433+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2434+#endif
2435+#ifdef CONFIG_PAX_MEMORY_UDEREF
2436+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2437+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2438+#endif
2439+ @ write r1 to current_thread_info()->cpu_domain
2440+ str r1, [r2, #TI_CPU_DOMAIN]
2441+ @ write r1 to DACR
2442+ mcr p15, 0, r1, c3, c0, 0
2443+ @ instruction sync
2444+ instr_sync
2445+ @ restore regs
2446+ ldmia sp!, {r1, r2}
2447+#endif
2448+ .endm
2449+
2450+ .macro pax_open_userland
2451+#ifdef CONFIG_PAX_MEMORY_UDEREF
2452+ @ save regs
2453+ stmdb sp!, {r0, r1}
2454+ @ read DACR from cpu_domain into r1
2455+ mov r0, sp
2456+ @ assume 8K pages, since we have to split the immediate in two
2457+ bic r0, r0, #(0x1fc0)
2458+ bic r0, r0, #(0x3f)
2459+ ldr r1, [r0, #TI_CPU_DOMAIN]
2460+ @ set current DOMAIN_USER to DOMAIN_CLIENT
2461+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2462+ orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
2463+ @ write r1 to current_thread_info()->cpu_domain
2464+ str r1, [r0, #TI_CPU_DOMAIN]
2465+ @ write r1 to DACR
2466+ mcr p15, 0, r1, c3, c0, 0
2467+ @ instruction sync
2468+ instr_sync
2469+ @ restore regs
2470+ ldmia sp!, {r0, r1}
2471+#endif
2472+ .endm
2473+
2474+ .macro pax_close_userland
2475+#ifdef CONFIG_PAX_MEMORY_UDEREF
2476+ @ save regs
2477+ stmdb sp!, {r0, r1}
2478+ @ read DACR from cpu_domain into r1
2479+ mov r0, sp
2480+ @ assume 8K pages, since we have to split the immediate in two
2481+ bic r0, r0, #(0x1fc0)
2482+ bic r0, r0, #(0x3f)
2483+ ldr r1, [r0, #TI_CPU_DOMAIN]
2484+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2485+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2486+ @ write r1 to current_thread_info()->cpu_domain
2487+ str r1, [r0, #TI_CPU_DOMAIN]
2488+ @ write r1 to DACR
2489+ mcr p15, 0, r1, c3, c0, 0
2490+ @ instruction sync
2491+ instr_sync
2492+ @ restore regs
2493+ ldmia sp!, {r0, r1}
2494+#endif
2495+ .endm
2496+
2497 .macro pabt_helper
2498 @ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5
2499 #ifdef MULTI_PABORT
2500@@ -90,11 +171,15 @@
2501 * Invalid mode handlers
2502 */
2503 .macro inv_entry, reason
2504+
2505+ pax_enter_kernel
2506+
2507 sub sp, sp, #S_FRAME_SIZE
2508 ARM( stmib sp, {r1 - lr} )
2509 THUMB( stmia sp, {r0 - r12} )
2510 THUMB( str sp, [sp, #S_SP] )
2511 THUMB( str lr, [sp, #S_LR] )
2512+
2513 mov r1, #\reason
2514 .endm
2515
2516@@ -150,7 +235,11 @@ ENDPROC(__und_invalid)
2517 .macro svc_entry, stack_hole=0, trace=1
2518 UNWIND(.fnstart )
2519 UNWIND(.save {r0 - pc} )
2520+
2521+ pax_enter_kernel
2522+
2523 sub sp, sp, #(S_FRAME_SIZE + \stack_hole - 4)
2524+
2525 #ifdef CONFIG_THUMB2_KERNEL
2526 SPFIX( str r0, [sp] ) @ temporarily saved
2527 SPFIX( mov r0, sp )
2528@@ -165,7 +254,12 @@ ENDPROC(__und_invalid)
2529 ldmia r0, {r3 - r5}
2530 add r7, sp, #S_SP - 4 @ here for interlock avoidance
2531 mov r6, #-1 @ "" "" "" ""
2532+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2533+ @ offset sp by 8 as done in pax_enter_kernel
2534+ add r2, sp, #(S_FRAME_SIZE + \stack_hole + 4)
2535+#else
2536 add r2, sp, #(S_FRAME_SIZE + \stack_hole - 4)
2537+#endif
2538 SPFIX( addeq r2, r2, #4 )
2539 str r3, [sp, #-4]! @ save the "real" r0 copied
2540 @ from the exception stack
2541@@ -369,6 +463,9 @@ ENDPROC(__fiq_abt)
2542 .macro usr_entry, trace=1
2543 UNWIND(.fnstart )
2544 UNWIND(.cantunwind ) @ don't unwind the user space
2545+
2546+ pax_enter_kernel_user
2547+
2548 sub sp, sp, #S_FRAME_SIZE
2549 ARM( stmib sp, {r1 - r12} )
2550 THUMB( stmia sp, {r0 - r12} )
2551@@ -479,7 +576,9 @@ __und_usr:
2552 tst r3, #PSR_T_BIT @ Thumb mode?
2553 bne __und_usr_thumb
2554 sub r4, r2, #4 @ ARM instr at LR - 4
2555+ pax_open_userland
2556 1: ldrt r0, [r4]
2557+ pax_close_userland
2558 ARM_BE8(rev r0, r0) @ little endian instruction
2559
2560 @ r0 = 32-bit ARM instruction which caused the exception
2561@@ -513,11 +612,15 @@ __und_usr_thumb:
2562 */
2563 .arch armv6t2
2564 #endif
2565+ pax_open_userland
2566 2: ldrht r5, [r4]
2567+ pax_close_userland
2568 ARM_BE8(rev16 r5, r5) @ little endian instruction
2569 cmp r5, #0xe800 @ 32bit instruction if xx != 0
2570 blo __und_usr_fault_16 @ 16bit undefined instruction
2571+ pax_open_userland
2572 3: ldrht r0, [r2]
2573+ pax_close_userland
2574 ARM_BE8(rev16 r0, r0) @ little endian instruction
2575 add r2, r2, #2 @ r2 is PC + 2, make it PC + 4
2576 str r2, [sp, #S_PC] @ it's a 2x16bit instr, update
2577@@ -547,7 +650,8 @@ ENDPROC(__und_usr)
2578 */
2579 .pushsection .fixup, "ax"
2580 .align 2
2581-4: str r4, [sp, #S_PC] @ retry current instruction
2582+4: pax_close_userland
2583+ str r4, [sp, #S_PC] @ retry current instruction
2584 ret r9
2585 .popsection
2586 .pushsection __ex_table,"a"
2587@@ -767,7 +871,7 @@ ENTRY(__switch_to)
2588 THUMB( str lr, [ip], #4 )
2589 ldr r4, [r2, #TI_TP_VALUE]
2590 ldr r5, [r2, #TI_TP_VALUE + 4]
2591-#ifdef CONFIG_CPU_USE_DOMAINS
2592+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2593 ldr r6, [r2, #TI_CPU_DOMAIN]
2594 #endif
2595 switch_tls r1, r4, r5, r3, r7
2596@@ -776,7 +880,7 @@ ENTRY(__switch_to)
2597 ldr r8, =__stack_chk_guard
2598 ldr r7, [r7, #TSK_STACK_CANARY]
2599 #endif
2600-#ifdef CONFIG_CPU_USE_DOMAINS
2601+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2602 mcr p15, 0, r6, c3, c0, 0 @ Set domain register
2603 #endif
2604 mov r5, r0
2605diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
2606index 4e7f40c..0f9ee2c 100644
2607--- a/arch/arm/kernel/entry-common.S
2608+++ b/arch/arm/kernel/entry-common.S
2609@@ -11,18 +11,46 @@
2610 #include <asm/assembler.h>
2611 #include <asm/unistd.h>
2612 #include <asm/ftrace.h>
2613+#include <asm/domain.h>
2614 #include <asm/unwind.h>
2615
2616+#include "entry-header.S"
2617+
2618 #ifdef CONFIG_NEED_RET_TO_USER
2619 #include <mach/entry-macro.S>
2620 #else
2621 .macro arch_ret_to_user, tmp1, tmp2
2622+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2623+ @ save regs
2624+ stmdb sp!, {r1, r2}
2625+ @ read DACR from cpu_domain into r1
2626+ mov r2, sp
2627+ @ assume 8K pages, since we have to split the immediate in two
2628+ bic r2, r2, #(0x1fc0)
2629+ bic r2, r2, #(0x3f)
2630+ ldr r1, [r2, #TI_CPU_DOMAIN]
2631+#ifdef CONFIG_PAX_KERNEXEC
2632+ @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2633+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2634+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2635+#endif
2636+#ifdef CONFIG_PAX_MEMORY_UDEREF
2637+ @ set current DOMAIN_USER to DOMAIN_UDEREF
2638+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2639+ orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
2640+#endif
2641+ @ write r1 to current_thread_info()->cpu_domain
2642+ str r1, [r2, #TI_CPU_DOMAIN]
2643+ @ write r1 to DACR
2644+ mcr p15, 0, r1, c3, c0, 0
2645+ @ instruction sync
2646+ instr_sync
2647+ @ restore regs
2648+ ldmia sp!, {r1, r2}
2649+#endif
2650 .endm
2651 #endif
2652
2653-#include "entry-header.S"
2654-
2655-
2656 .align 5
2657 /*
2658 * This is the fast syscall return path. We do as little as
2659@@ -173,6 +201,12 @@ ENTRY(vector_swi)
2660 USER( ldr scno, [lr, #-4] ) @ get SWI instruction
2661 #endif
2662
2663+ /*
2664+ * do this here to avoid a performance hit of wrapping the code above
2665+ * that directly dereferences userland to parse the SWI instruction
2666+ */
2667+ pax_enter_kernel_user
2668+
2669 adr tbl, sys_call_table @ load syscall table pointer
2670
2671 #if defined(CONFIG_OABI_COMPAT)
2672diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
2673index 1a0045a..9b4f34d 100644
2674--- a/arch/arm/kernel/entry-header.S
2675+++ b/arch/arm/kernel/entry-header.S
2676@@ -196,6 +196,60 @@
2677 msr cpsr_c, \rtemp @ switch back to the SVC mode
2678 .endm
2679
2680+ .macro pax_enter_kernel_user
2681+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2682+ @ save regs
2683+ stmdb sp!, {r0, r1}
2684+ @ read DACR from cpu_domain into r1
2685+ mov r0, sp
2686+ @ assume 8K pages, since we have to split the immediate in two
2687+ bic r0, r0, #(0x1fc0)
2688+ bic r0, r0, #(0x3f)
2689+ ldr r1, [r0, #TI_CPU_DOMAIN]
2690+#ifdef CONFIG_PAX_MEMORY_UDEREF
2691+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2692+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2693+#endif
2694+#ifdef CONFIG_PAX_KERNEXEC
2695+ @ set current DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2696+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2697+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2698+#endif
2699+ @ write r1 to current_thread_info()->cpu_domain
2700+ str r1, [r0, #TI_CPU_DOMAIN]
2701+ @ write r1 to DACR
2702+ mcr p15, 0, r1, c3, c0, 0
2703+ @ instruction sync
2704+ instr_sync
2705+ @ restore regs
2706+ ldmia sp!, {r0, r1}
2707+#endif
2708+ .endm
2709+
2710+ .macro pax_exit_kernel
2711+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2712+ @ save regs
2713+ stmdb sp!, {r0, r1}
2714+ @ read old DACR from stack into r1
2715+ ldr r1, [sp, #(8 + S_SP)]
2716+ sub r1, r1, #8
2717+ ldr r1, [r1]
2718+
2719+ @ write r1 to current_thread_info()->cpu_domain
2720+ mov r0, sp
2721+ @ assume 8K pages, since we have to split the immediate in two
2722+ bic r0, r0, #(0x1fc0)
2723+ bic r0, r0, #(0x3f)
2724+ str r1, [r0, #TI_CPU_DOMAIN]
2725+ @ write r1 to DACR
2726+ mcr p15, 0, r1, c3, c0, 0
2727+ @ instruction sync
2728+ instr_sync
2729+ @ restore regs
2730+ ldmia sp!, {r0, r1}
2731+#endif
2732+ .endm
2733+
2734 #ifndef CONFIG_THUMB2_KERNEL
2735 .macro svc_exit, rpsr, irq = 0
2736 .if \irq != 0
2737@@ -215,6 +269,9 @@
2738 blne trace_hardirqs_off
2739 #endif
2740 .endif
2741+
2742+ pax_exit_kernel
2743+
2744 msr spsr_cxsf, \rpsr
2745 #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K)
2746 @ We must avoid clrex due to Cortex-A15 erratum #830321
2747@@ -291,6 +348,9 @@
2748 blne trace_hardirqs_off
2749 #endif
2750 .endif
2751+
2752+ pax_exit_kernel
2753+
2754 ldr lr, [sp, #S_SP] @ top of the stack
2755 ldrd r0, r1, [sp, #S_LR] @ calling lr and pc
2756
2757diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c
2758index 059c3da..8e45cfc 100644
2759--- a/arch/arm/kernel/fiq.c
2760+++ b/arch/arm/kernel/fiq.c
2761@@ -95,7 +95,10 @@ void set_fiq_handler(void *start, unsigned int length)
2762 void *base = vectors_page;
2763 unsigned offset = FIQ_OFFSET;
2764
2765+ pax_open_kernel();
2766 memcpy(base + offset, start, length);
2767+ pax_close_kernel();
2768+
2769 if (!cache_is_vipt_nonaliasing())
2770 flush_icache_range((unsigned long)base + offset, offset +
2771 length);
2772diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
2773index 0196327..50ac8895 100644
2774--- a/arch/arm/kernel/head.S
2775+++ b/arch/arm/kernel/head.S
2776@@ -444,7 +444,7 @@ __enable_mmu:
2777 mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
2778 domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
2779 domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \
2780- domain_val(DOMAIN_IO, DOMAIN_CLIENT))
2781+ domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT))
2782 mcr p15, 0, r5, c3, c0, 0 @ load domain access register
2783 mcr p15, 0, r4, c2, c0, 0 @ load page table pointer
2784 #endif
2785diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
2786index 2e11961..07f0704 100644
2787--- a/arch/arm/kernel/module.c
2788+++ b/arch/arm/kernel/module.c
2789@@ -38,12 +38,39 @@
2790 #endif
2791
2792 #ifdef CONFIG_MMU
2793-void *module_alloc(unsigned long size)
2794+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
2795 {
2796+ if (!size || PAGE_ALIGN(size) > MODULES_END - MODULES_VADDR)
2797+ return NULL;
2798 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
2799- GFP_KERNEL, PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
2800+ GFP_KERNEL, prot, 0, NUMA_NO_NODE,
2801 __builtin_return_address(0));
2802 }
2803+
2804+void *module_alloc(unsigned long size)
2805+{
2806+
2807+#ifdef CONFIG_PAX_KERNEXEC
2808+ return __module_alloc(size, PAGE_KERNEL);
2809+#else
2810+ return __module_alloc(size, PAGE_KERNEL_EXEC);
2811+#endif
2812+
2813+}
2814+
2815+#ifdef CONFIG_PAX_KERNEXEC
2816+void module_memfree_exec(void *module_region)
2817+{
2818+ module_memfree(module_region);
2819+}
2820+EXPORT_SYMBOL(module_memfree_exec);
2821+
2822+void *module_alloc_exec(unsigned long size)
2823+{
2824+ return __module_alloc(size, PAGE_KERNEL_EXEC);
2825+}
2826+EXPORT_SYMBOL(module_alloc_exec);
2827+#endif
2828 #endif
2829
2830 int
2831diff --git a/arch/arm/kernel/patch.c b/arch/arm/kernel/patch.c
2832index 69bda1a..755113a 100644
2833--- a/arch/arm/kernel/patch.c
2834+++ b/arch/arm/kernel/patch.c
2835@@ -66,6 +66,7 @@ void __kprobes __patch_text_real(void *addr, unsigned int insn, bool remap)
2836 else
2837 __acquire(&patch_lock);
2838
2839+ pax_open_kernel();
2840 if (thumb2 && __opcode_is_thumb16(insn)) {
2841 *(u16 *)waddr = __opcode_to_mem_thumb16(insn);
2842 size = sizeof(u16);
2843@@ -97,6 +98,7 @@ void __kprobes __patch_text_real(void *addr, unsigned int insn, bool remap)
2844 *(u32 *)waddr = insn;
2845 size = sizeof(u32);
2846 }
2847+ pax_close_kernel();
2848
2849 if (waddr != addr) {
2850 flush_kernel_vmap_range(waddr, twopage ? size / 2 : size);
2851diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
2852index 2bf1a16..d959d40 100644
2853--- a/arch/arm/kernel/process.c
2854+++ b/arch/arm/kernel/process.c
2855@@ -213,6 +213,7 @@ void machine_power_off(void)
2856
2857 if (pm_power_off)
2858 pm_power_off();
2859+ BUG();
2860 }
2861
2862 /*
2863@@ -226,7 +227,7 @@ void machine_power_off(void)
2864 * executing pre-reset code, and using RAM that the primary CPU's code wishes
2865 * to use. Implementing such co-ordination would be essentially impossible.
2866 */
2867-void machine_restart(char *cmd)
2868+__noreturn void machine_restart(char *cmd)
2869 {
2870 local_irq_disable();
2871 smp_send_stop();
2872@@ -252,8 +253,8 @@ void __show_regs(struct pt_regs *regs)
2873
2874 show_regs_print_info(KERN_DEFAULT);
2875
2876- print_symbol("PC is at %s\n", instruction_pointer(regs));
2877- print_symbol("LR is at %s\n", regs->ARM_lr);
2878+ printk("PC is at %pA\n", (void *)instruction_pointer(regs));
2879+ printk("LR is at %pA\n", (void *)regs->ARM_lr);
2880 printk("pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n"
2881 "sp : %08lx ip : %08lx fp : %08lx\n",
2882 regs->ARM_pc, regs->ARM_lr, regs->ARM_cpsr,
2883@@ -430,12 +431,6 @@ unsigned long get_wchan(struct task_struct *p)
2884 return 0;
2885 }
2886
2887-unsigned long arch_randomize_brk(struct mm_struct *mm)
2888-{
2889- unsigned long range_end = mm->brk + 0x02000000;
2890- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
2891-}
2892-
2893 #ifdef CONFIG_MMU
2894 #ifdef CONFIG_KUSER_HELPERS
2895 /*
2896@@ -451,7 +446,7 @@ static struct vm_area_struct gate_vma = {
2897
2898 static int __init gate_vma_init(void)
2899 {
2900- gate_vma.vm_page_prot = PAGE_READONLY_EXEC;
2901+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
2902 return 0;
2903 }
2904 arch_initcall(gate_vma_init);
2905@@ -480,81 +475,13 @@ const char *arch_vma_name(struct vm_area_struct *vma)
2906 return is_gate_vma(vma) ? "[vectors]" : NULL;
2907 }
2908
2909-/* If possible, provide a placement hint at a random offset from the
2910- * stack for the signal page.
2911- */
2912-static unsigned long sigpage_addr(const struct mm_struct *mm,
2913- unsigned int npages)
2914-{
2915- unsigned long offset;
2916- unsigned long first;
2917- unsigned long last;
2918- unsigned long addr;
2919- unsigned int slots;
2920-
2921- first = PAGE_ALIGN(mm->start_stack);
2922-
2923- last = TASK_SIZE - (npages << PAGE_SHIFT);
2924-
2925- /* No room after stack? */
2926- if (first > last)
2927- return 0;
2928-
2929- /* Just enough room? */
2930- if (first == last)
2931- return first;
2932-
2933- slots = ((last - first) >> PAGE_SHIFT) + 1;
2934-
2935- offset = get_random_int() % slots;
2936-
2937- addr = first + (offset << PAGE_SHIFT);
2938-
2939- return addr;
2940-}
2941-
2942-static struct page *signal_page;
2943-extern struct page *get_signal_page(void);
2944-
2945-static const struct vm_special_mapping sigpage_mapping = {
2946- .name = "[sigpage]",
2947- .pages = &signal_page,
2948-};
2949-
2950 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
2951 {
2952 struct mm_struct *mm = current->mm;
2953- struct vm_area_struct *vma;
2954- unsigned long addr;
2955- unsigned long hint;
2956- int ret = 0;
2957-
2958- if (!signal_page)
2959- signal_page = get_signal_page();
2960- if (!signal_page)
2961- return -ENOMEM;
2962
2963 down_write(&mm->mmap_sem);
2964- hint = sigpage_addr(mm, 1);
2965- addr = get_unmapped_area(NULL, hint, PAGE_SIZE, 0, 0);
2966- if (IS_ERR_VALUE(addr)) {
2967- ret = addr;
2968- goto up_fail;
2969- }
2970-
2971- vma = _install_special_mapping(mm, addr, PAGE_SIZE,
2972- VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
2973- &sigpage_mapping);
2974-
2975- if (IS_ERR(vma)) {
2976- ret = PTR_ERR(vma);
2977- goto up_fail;
2978- }
2979-
2980- mm->context.sigpage = addr;
2981-
2982- up_fail:
2983+ mm->context.sigpage = (PAGE_OFFSET + (get_random_int() % 0x3FFEFFE0)) & 0xFFFFFFFC;
2984 up_write(&mm->mmap_sem);
2985- return ret;
2986+ return 0;
2987 }
2988 #endif
2989diff --git a/arch/arm/kernel/psci.c b/arch/arm/kernel/psci.c
2990index f90fdf4..24e8c84 100644
2991--- a/arch/arm/kernel/psci.c
2992+++ b/arch/arm/kernel/psci.c
2993@@ -26,7 +26,7 @@
2994 #include <asm/psci.h>
2995 #include <asm/system_misc.h>
2996
2997-struct psci_operations psci_ops;
2998+struct psci_operations psci_ops __read_only;
2999
3000 static int (*invoke_psci_fn)(u32, u32, u32, u32);
3001 typedef int (*psci_initcall_t)(const struct device_node *);
3002diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
3003index ef9119f..31995a3 100644
3004--- a/arch/arm/kernel/ptrace.c
3005+++ b/arch/arm/kernel/ptrace.c
3006@@ -928,10 +928,19 @@ static void tracehook_report_syscall(struct pt_regs *regs,
3007 regs->ARM_ip = ip;
3008 }
3009
3010+#ifdef CONFIG_GRKERNSEC_SETXID
3011+extern void gr_delayed_cred_worker(void);
3012+#endif
3013+
3014 asmlinkage int syscall_trace_enter(struct pt_regs *regs, int scno)
3015 {
3016 current_thread_info()->syscall = scno;
3017
3018+#ifdef CONFIG_GRKERNSEC_SETXID
3019+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
3020+ gr_delayed_cred_worker();
3021+#endif
3022+
3023 /* Do the secure computing check first; failures should be fast. */
3024 #ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER
3025 if (secure_computing() == -1)
3026diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
3027index 1d60beb..4aa25d5 100644
3028--- a/arch/arm/kernel/setup.c
3029+++ b/arch/arm/kernel/setup.c
3030@@ -105,21 +105,23 @@ EXPORT_SYMBOL(elf_hwcap);
3031 unsigned int elf_hwcap2 __read_mostly;
3032 EXPORT_SYMBOL(elf_hwcap2);
3033
3034+pteval_t __supported_pte_mask __read_only;
3035+pmdval_t __supported_pmd_mask __read_only;
3036
3037 #ifdef MULTI_CPU
3038-struct processor processor __read_mostly;
3039+struct processor processor __read_only;
3040 #endif
3041 #ifdef MULTI_TLB
3042-struct cpu_tlb_fns cpu_tlb __read_mostly;
3043+struct cpu_tlb_fns cpu_tlb __read_only;
3044 #endif
3045 #ifdef MULTI_USER
3046-struct cpu_user_fns cpu_user __read_mostly;
3047+struct cpu_user_fns cpu_user __read_only;
3048 #endif
3049 #ifdef MULTI_CACHE
3050-struct cpu_cache_fns cpu_cache __read_mostly;
3051+struct cpu_cache_fns cpu_cache __read_only;
3052 #endif
3053 #ifdef CONFIG_OUTER_CACHE
3054-struct outer_cache_fns outer_cache __read_mostly;
3055+struct outer_cache_fns outer_cache __read_only;
3056 EXPORT_SYMBOL(outer_cache);
3057 #endif
3058
3059@@ -250,9 +252,13 @@ static int __get_cpu_architecture(void)
3060 * Register 0 and check for VMSAv7 or PMSAv7 */
3061 unsigned int mmfr0 = read_cpuid_ext(CPUID_EXT_MMFR0);
3062 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
3063- (mmfr0 & 0x000000f0) >= 0x00000030)
3064+ (mmfr0 & 0x000000f0) >= 0x00000030) {
3065 cpu_arch = CPU_ARCH_ARMv7;
3066- else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
3067+ if ((mmfr0 & 0x0000000f) == 0x00000005 || (mmfr0 & 0x0000000f) == 0x00000004) {
3068+ __supported_pte_mask |= L_PTE_PXN;
3069+ __supported_pmd_mask |= PMD_PXNTABLE;
3070+ }
3071+ } else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
3072 (mmfr0 & 0x000000f0) == 0x00000020)
3073 cpu_arch = CPU_ARCH_ARMv6;
3074 else
3075diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
3076index 023ac90..0a69950 100644
3077--- a/arch/arm/kernel/signal.c
3078+++ b/arch/arm/kernel/signal.c
3079@@ -24,8 +24,6 @@
3080
3081 extern const unsigned long sigreturn_codes[7];
3082
3083-static unsigned long signal_return_offset;
3084-
3085 #ifdef CONFIG_CRUNCH
3086 static int preserve_crunch_context(struct crunch_sigframe __user *frame)
3087 {
3088@@ -396,8 +394,7 @@ setup_return(struct pt_regs *regs, struct ksignal *ksig,
3089 * except when the MPU has protected the vectors
3090 * page from PL0
3091 */
3092- retcode = mm->context.sigpage + signal_return_offset +
3093- (idx << 2) + thumb;
3094+ retcode = mm->context.sigpage + (idx << 2) + thumb;
3095 } else
3096 #endif
3097 {
3098@@ -603,33 +600,3 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
3099 } while (thread_flags & _TIF_WORK_MASK);
3100 return 0;
3101 }
3102-
3103-struct page *get_signal_page(void)
3104-{
3105- unsigned long ptr;
3106- unsigned offset;
3107- struct page *page;
3108- void *addr;
3109-
3110- page = alloc_pages(GFP_KERNEL, 0);
3111-
3112- if (!page)
3113- return NULL;
3114-
3115- addr = page_address(page);
3116-
3117- /* Give the signal return code some randomness */
3118- offset = 0x200 + (get_random_int() & 0x7fc);
3119- signal_return_offset = offset;
3120-
3121- /*
3122- * Copy signal return handlers into the vector page, and
3123- * set sigreturn to be a pointer to these.
3124- */
3125- memcpy(addr + offset, sigreturn_codes, sizeof(sigreturn_codes));
3126-
3127- ptr = (unsigned long)addr + offset;
3128- flush_icache_range(ptr, ptr + sizeof(sigreturn_codes));
3129-
3130- return page;
3131-}
3132diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
3133index 86ef244..c518451 100644
3134--- a/arch/arm/kernel/smp.c
3135+++ b/arch/arm/kernel/smp.c
3136@@ -76,7 +76,7 @@ enum ipi_msg_type {
3137
3138 static DECLARE_COMPLETION(cpu_running);
3139
3140-static struct smp_operations smp_ops;
3141+static struct smp_operations smp_ops __read_only;
3142
3143 void __init smp_set_ops(struct smp_operations *ops)
3144 {
3145diff --git a/arch/arm/kernel/tcm.c b/arch/arm/kernel/tcm.c
3146index 7a3be1d..b00c7de 100644
3147--- a/arch/arm/kernel/tcm.c
3148+++ b/arch/arm/kernel/tcm.c
3149@@ -61,7 +61,7 @@ static struct map_desc itcm_iomap[] __initdata = {
3150 .virtual = ITCM_OFFSET,
3151 .pfn = __phys_to_pfn(ITCM_OFFSET),
3152 .length = 0,
3153- .type = MT_MEMORY_RWX_ITCM,
3154+ .type = MT_MEMORY_RX_ITCM,
3155 }
3156 };
3157
3158@@ -267,7 +267,9 @@ no_dtcm:
3159 start = &__sitcm_text;
3160 end = &__eitcm_text;
3161 ram = &__itcm_start;
3162+ pax_open_kernel();
3163 memcpy(start, ram, itcm_code_sz);
3164+ pax_close_kernel();
3165 pr_debug("CPU ITCM: copied code from %p - %p\n",
3166 start, end);
3167 itcm_present = true;
3168diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
3169index 788e23f..6fa06a1 100644
3170--- a/arch/arm/kernel/traps.c
3171+++ b/arch/arm/kernel/traps.c
3172@@ -65,7 +65,7 @@ static void dump_mem(const char *, const char *, unsigned long, unsigned long);
3173 void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame)
3174 {
3175 #ifdef CONFIG_KALLSYMS
3176- printk("[<%08lx>] (%ps) from [<%08lx>] (%pS)\n", where, (void *)where, from, (void *)from);
3177+ printk("[<%08lx>] (%pA) from [<%08lx>] (%pA)\n", where, (void *)where, from, (void *)from);
3178 #else
3179 printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from);
3180 #endif
3181@@ -267,6 +267,8 @@ static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
3182 static int die_owner = -1;
3183 static unsigned int die_nest_count;
3184
3185+extern void gr_handle_kernel_exploit(void);
3186+
3187 static unsigned long oops_begin(void)
3188 {
3189 int cpu;
3190@@ -309,6 +311,9 @@ static void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
3191 panic("Fatal exception in interrupt");
3192 if (panic_on_oops)
3193 panic("Fatal exception");
3194+
3195+ gr_handle_kernel_exploit();
3196+
3197 if (signr)
3198 do_exit(signr);
3199 }
3200@@ -880,7 +885,11 @@ void __init early_trap_init(void *vectors_base)
3201 kuser_init(vectors_base);
3202
3203 flush_icache_range(vectors, vectors + PAGE_SIZE * 2);
3204- modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
3205+
3206+#ifndef CONFIG_PAX_MEMORY_UDEREF
3207+ modify_domain(DOMAIN_USER, DOMAIN_USERCLIENT);
3208+#endif
3209+
3210 #else /* ifndef CONFIG_CPU_V7M */
3211 /*
3212 * on V7-M there is no need to copy the vector table to a dedicated
3213diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
3214index b31aa73..cc4b7a1 100644
3215--- a/arch/arm/kernel/vmlinux.lds.S
3216+++ b/arch/arm/kernel/vmlinux.lds.S
3217@@ -37,7 +37,7 @@
3218 #endif
3219
3220 #if (defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK)) || \
3221- defined(CONFIG_GENERIC_BUG)
3222+ defined(CONFIG_GENERIC_BUG) || defined(CONFIG_PAX_REFCOUNT)
3223 #define ARM_EXIT_KEEP(x) x
3224 #define ARM_EXIT_DISCARD(x)
3225 #else
3226@@ -123,6 +123,8 @@ SECTIONS
3227 #ifdef CONFIG_DEBUG_RODATA
3228 . = ALIGN(1<<SECTION_SHIFT);
3229 #endif
3230+ _etext = .; /* End of text section */
3231+
3232 RO_DATA(PAGE_SIZE)
3233
3234 . = ALIGN(4);
3235@@ -153,8 +155,6 @@ SECTIONS
3236
3237 NOTES
3238
3239- _etext = .; /* End of text and rodata section */
3240-
3241 #ifndef CONFIG_XIP_KERNEL
3242 # ifdef CONFIG_ARM_KERNMEM_PERMS
3243 . = ALIGN(1<<SECTION_SHIFT);
3244diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
3245index b652af5..60231ab 100644
3246--- a/arch/arm/kvm/arm.c
3247+++ b/arch/arm/kvm/arm.c
3248@@ -57,7 +57,7 @@ static unsigned long hyp_default_vectors;
3249 static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_arm_running_vcpu);
3250
3251 /* The VMID used in the VTTBR */
3252-static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
3253+static atomic64_unchecked_t kvm_vmid_gen = ATOMIC64_INIT(1);
3254 static u8 kvm_next_vmid;
3255 static DEFINE_SPINLOCK(kvm_vmid_lock);
3256
3257@@ -358,7 +358,7 @@ void force_vm_exit(const cpumask_t *mask)
3258 */
3259 static bool need_new_vmid_gen(struct kvm *kvm)
3260 {
3261- return unlikely(kvm->arch.vmid_gen != atomic64_read(&kvm_vmid_gen));
3262+ return unlikely(kvm->arch.vmid_gen != atomic64_read_unchecked(&kvm_vmid_gen));
3263 }
3264
3265 /**
3266@@ -391,7 +391,7 @@ static void update_vttbr(struct kvm *kvm)
3267
3268 /* First user of a new VMID generation? */
3269 if (unlikely(kvm_next_vmid == 0)) {
3270- atomic64_inc(&kvm_vmid_gen);
3271+ atomic64_inc_unchecked(&kvm_vmid_gen);
3272 kvm_next_vmid = 1;
3273
3274 /*
3275@@ -408,7 +408,7 @@ static void update_vttbr(struct kvm *kvm)
3276 kvm_call_hyp(__kvm_flush_vm_context);
3277 }
3278
3279- kvm->arch.vmid_gen = atomic64_read(&kvm_vmid_gen);
3280+ kvm->arch.vmid_gen = atomic64_read_unchecked(&kvm_vmid_gen);
3281 kvm->arch.vmid = kvm_next_vmid;
3282 kvm_next_vmid++;
3283
3284@@ -1087,7 +1087,7 @@ struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr)
3285 /**
3286 * Initialize Hyp-mode and memory mappings on all CPUs.
3287 */
3288-int kvm_arch_init(void *opaque)
3289+int kvm_arch_init(const void *opaque)
3290 {
3291 int err;
3292 int ret, cpu;
3293diff --git a/arch/arm/lib/clear_user.S b/arch/arm/lib/clear_user.S
3294index 14a0d98..7771a7d 100644
3295--- a/arch/arm/lib/clear_user.S
3296+++ b/arch/arm/lib/clear_user.S
3297@@ -12,14 +12,14 @@
3298
3299 .text
3300
3301-/* Prototype: int __clear_user(void *addr, size_t sz)
3302+/* Prototype: int ___clear_user(void *addr, size_t sz)
3303 * Purpose : clear some user memory
3304 * Params : addr - user memory address to clear
3305 * : sz - number of bytes to clear
3306 * Returns : number of bytes NOT cleared
3307 */
3308 ENTRY(__clear_user_std)
3309-WEAK(__clear_user)
3310+WEAK(___clear_user)
3311 stmfd sp!, {r1, lr}
3312 mov r2, #0
3313 cmp r1, #4
3314@@ -44,7 +44,7 @@ WEAK(__clear_user)
3315 USER( strnebt r2, [r0])
3316 mov r0, #0
3317 ldmfd sp!, {r1, pc}
3318-ENDPROC(__clear_user)
3319+ENDPROC(___clear_user)
3320 ENDPROC(__clear_user_std)
3321
3322 .pushsection .fixup,"ax"
3323diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
3324index 7a235b9..73a0556 100644
3325--- a/arch/arm/lib/copy_from_user.S
3326+++ b/arch/arm/lib/copy_from_user.S
3327@@ -17,7 +17,7 @@
3328 /*
3329 * Prototype:
3330 *
3331- * size_t __copy_from_user(void *to, const void *from, size_t n)
3332+ * size_t ___copy_from_user(void *to, const void *from, size_t n)
3333 *
3334 * Purpose:
3335 *
3336@@ -89,11 +89,11 @@
3337
3338 .text
3339
3340-ENTRY(__copy_from_user)
3341+ENTRY(___copy_from_user)
3342
3343 #include "copy_template.S"
3344
3345-ENDPROC(__copy_from_user)
3346+ENDPROC(___copy_from_user)
3347
3348 .pushsection .fixup,"ax"
3349 .align 0
3350diff --git a/arch/arm/lib/copy_page.S b/arch/arm/lib/copy_page.S
3351index 6ee2f67..d1cce76 100644
3352--- a/arch/arm/lib/copy_page.S
3353+++ b/arch/arm/lib/copy_page.S
3354@@ -10,6 +10,7 @@
3355 * ASM optimised string functions
3356 */
3357 #include <linux/linkage.h>
3358+#include <linux/const.h>
3359 #include <asm/assembler.h>
3360 #include <asm/asm-offsets.h>
3361 #include <asm/cache.h>
3362diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
3363index a9d3db1..164b089 100644
3364--- a/arch/arm/lib/copy_to_user.S
3365+++ b/arch/arm/lib/copy_to_user.S
3366@@ -17,7 +17,7 @@
3367 /*
3368 * Prototype:
3369 *
3370- * size_t __copy_to_user(void *to, const void *from, size_t n)
3371+ * size_t ___copy_to_user(void *to, const void *from, size_t n)
3372 *
3373 * Purpose:
3374 *
3375@@ -93,11 +93,11 @@
3376 .text
3377
3378 ENTRY(__copy_to_user_std)
3379-WEAK(__copy_to_user)
3380+WEAK(___copy_to_user)
3381
3382 #include "copy_template.S"
3383
3384-ENDPROC(__copy_to_user)
3385+ENDPROC(___copy_to_user)
3386 ENDPROC(__copy_to_user_std)
3387
3388 .pushsection .fixup,"ax"
3389diff --git a/arch/arm/lib/csumpartialcopyuser.S b/arch/arm/lib/csumpartialcopyuser.S
3390index 7d08b43..f7ca7ea 100644
3391--- a/arch/arm/lib/csumpartialcopyuser.S
3392+++ b/arch/arm/lib/csumpartialcopyuser.S
3393@@ -57,8 +57,8 @@
3394 * Returns : r0 = checksum, [[sp, #0], #0] = 0 or -EFAULT
3395 */
3396
3397-#define FN_ENTRY ENTRY(csum_partial_copy_from_user)
3398-#define FN_EXIT ENDPROC(csum_partial_copy_from_user)
3399+#define FN_ENTRY ENTRY(__csum_partial_copy_from_user)
3400+#define FN_EXIT ENDPROC(__csum_partial_copy_from_user)
3401
3402 #include "csumpartialcopygeneric.S"
3403
3404diff --git a/arch/arm/lib/delay.c b/arch/arm/lib/delay.c
3405index 312d43e..21d2322 100644
3406--- a/arch/arm/lib/delay.c
3407+++ b/arch/arm/lib/delay.c
3408@@ -29,7 +29,7 @@
3409 /*
3410 * Default to the loop-based delay implementation.
3411 */
3412-struct arm_delay_ops arm_delay_ops = {
3413+struct arm_delay_ops arm_delay_ops __read_only = {
3414 .delay = __loop_delay,
3415 .const_udelay = __loop_const_udelay,
3416 .udelay = __loop_udelay,
3417diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
3418index 3e58d71..029817c 100644
3419--- a/arch/arm/lib/uaccess_with_memcpy.c
3420+++ b/arch/arm/lib/uaccess_with_memcpy.c
3421@@ -136,7 +136,7 @@ out:
3422 }
3423
3424 unsigned long
3425-__copy_to_user(void __user *to, const void *from, unsigned long n)
3426+___copy_to_user(void __user *to, const void *from, unsigned long n)
3427 {
3428 /*
3429 * This test is stubbed out of the main function above to keep
3430@@ -190,7 +190,7 @@ out:
3431 return n;
3432 }
3433
3434-unsigned long __clear_user(void __user *addr, unsigned long n)
3435+unsigned long ___clear_user(void __user *addr, unsigned long n)
3436 {
3437 /* See rational for this in __copy_to_user() above. */
3438 if (n < 64)
3439diff --git a/arch/arm/mach-exynos/suspend.c b/arch/arm/mach-exynos/suspend.c
3440index 582ef2d..d314e82 100644
3441--- a/arch/arm/mach-exynos/suspend.c
3442+++ b/arch/arm/mach-exynos/suspend.c
3443@@ -18,6 +18,7 @@
3444 #include <linux/syscore_ops.h>
3445 #include <linux/cpu_pm.h>
3446 #include <linux/io.h>
3447+#include <linux/irq.h>
3448 #include <linux/irqchip/arm-gic.h>
3449 #include <linux/err.h>
3450 #include <linux/regulator/machine.h>
3451@@ -635,8 +636,10 @@ void __init exynos_pm_init(void)
3452 tmp |= pm_data->wake_disable_mask;
3453 pmu_raw_writel(tmp, S5P_WAKEUP_MASK);
3454
3455- exynos_pm_syscore_ops.suspend = pm_data->pm_suspend;
3456- exynos_pm_syscore_ops.resume = pm_data->pm_resume;
3457+ pax_open_kernel();
3458+ *(void **)&exynos_pm_syscore_ops.suspend = pm_data->pm_suspend;
3459+ *(void **)&exynos_pm_syscore_ops.resume = pm_data->pm_resume;
3460+ pax_close_kernel();
3461
3462 register_syscore_ops(&exynos_pm_syscore_ops);
3463 suspend_set_ops(&exynos_suspend_ops);
3464diff --git a/arch/arm/mach-keystone/keystone.c b/arch/arm/mach-keystone/keystone.c
3465index 0662087..004d163 100644
3466--- a/arch/arm/mach-keystone/keystone.c
3467+++ b/arch/arm/mach-keystone/keystone.c
3468@@ -27,7 +27,7 @@
3469
3470 #include "keystone.h"
3471
3472-static struct notifier_block platform_nb;
3473+static notifier_block_no_const platform_nb;
3474 static unsigned long keystone_dma_pfn_offset __read_mostly;
3475
3476 static int keystone_platform_notifier(struct notifier_block *nb,
3477diff --git a/arch/arm/mach-mvebu/coherency.c b/arch/arm/mach-mvebu/coherency.c
3478index e46e9ea..9141c83 100644
3479--- a/arch/arm/mach-mvebu/coherency.c
3480+++ b/arch/arm/mach-mvebu/coherency.c
3481@@ -117,7 +117,7 @@ static void __init armada_370_coherency_init(struct device_node *np)
3482
3483 /*
3484 * This ioremap hook is used on Armada 375/38x to ensure that PCIe
3485- * memory areas are mapped as MT_UNCACHED instead of MT_DEVICE. This
3486+ * memory areas are mapped as MT_UNCACHED_RW instead of MT_DEVICE. This
3487 * is needed as a workaround for a deadlock issue between the PCIe
3488 * interface and the cache controller.
3489 */
3490@@ -130,7 +130,7 @@ armada_pcie_wa_ioremap_caller(phys_addr_t phys_addr, size_t size,
3491 mvebu_mbus_get_pcie_mem_aperture(&pcie_mem);
3492
3493 if (pcie_mem.start <= phys_addr && (phys_addr + size) <= pcie_mem.end)
3494- mtype = MT_UNCACHED;
3495+ mtype = MT_UNCACHED_RW;
3496
3497 return __arm_ioremap_caller(phys_addr, size, mtype, caller);
3498 }
3499diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
3500index b6443a4..20a0b74 100644
3501--- a/arch/arm/mach-omap2/board-n8x0.c
3502+++ b/arch/arm/mach-omap2/board-n8x0.c
3503@@ -569,7 +569,7 @@ static int n8x0_menelaus_late_init(struct device *dev)
3504 }
3505 #endif
3506
3507-struct menelaus_platform_data n8x0_menelaus_platform_data __initdata = {
3508+struct menelaus_platform_data n8x0_menelaus_platform_data __initconst = {
3509 .late_init = n8x0_menelaus_late_init,
3510 };
3511
3512diff --git a/arch/arm/mach-omap2/omap-mpuss-lowpower.c b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
3513index 79f49d9..70bf184 100644
3514--- a/arch/arm/mach-omap2/omap-mpuss-lowpower.c
3515+++ b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
3516@@ -86,7 +86,7 @@ struct cpu_pm_ops {
3517 void (*resume)(void);
3518 void (*scu_prepare)(unsigned int cpu_id, unsigned int cpu_state);
3519 void (*hotplug_restart)(void);
3520-};
3521+} __no_const;
3522
3523 static DEFINE_PER_CPU(struct omap4_cpu_pm_info, omap4_pm_info);
3524 static struct powerdomain *mpuss_pd;
3525@@ -105,7 +105,7 @@ static void dummy_cpu_resume(void)
3526 static void dummy_scu_prepare(unsigned int cpu_id, unsigned int cpu_state)
3527 {}
3528
3529-struct cpu_pm_ops omap_pm_ops = {
3530+static struct cpu_pm_ops omap_pm_ops __read_only = {
3531 .finish_suspend = default_finish_suspend,
3532 .resume = dummy_cpu_resume,
3533 .scu_prepare = dummy_scu_prepare,
3534diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c
3535index 5305ec7..6d74045 100644
3536--- a/arch/arm/mach-omap2/omap-smp.c
3537+++ b/arch/arm/mach-omap2/omap-smp.c
3538@@ -19,6 +19,7 @@
3539 #include <linux/device.h>
3540 #include <linux/smp.h>
3541 #include <linux/io.h>
3542+#include <linux/irq.h>
3543 #include <linux/irqchip/arm-gic.h>
3544
3545 #include <asm/smp_scu.h>
3546diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c
3547index f961c46..4a453dc 100644
3548--- a/arch/arm/mach-omap2/omap-wakeupgen.c
3549+++ b/arch/arm/mach-omap2/omap-wakeupgen.c
3550@@ -344,7 +344,7 @@ static int irq_cpu_hotplug_notify(struct notifier_block *self,
3551 return NOTIFY_OK;
3552 }
3553
3554-static struct notifier_block __refdata irq_hotplug_notifier = {
3555+static struct notifier_block irq_hotplug_notifier = {
3556 .notifier_call = irq_cpu_hotplug_notify,
3557 };
3558
3559diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c
3560index be9541e..821805f 100644
3561--- a/arch/arm/mach-omap2/omap_device.c
3562+++ b/arch/arm/mach-omap2/omap_device.c
3563@@ -510,7 +510,7 @@ void omap_device_delete(struct omap_device *od)
3564 struct platform_device __init *omap_device_build(const char *pdev_name,
3565 int pdev_id,
3566 struct omap_hwmod *oh,
3567- void *pdata, int pdata_len)
3568+ const void *pdata, int pdata_len)
3569 {
3570 struct omap_hwmod *ohs[] = { oh };
3571
3572@@ -538,7 +538,7 @@ struct platform_device __init *omap_device_build(const char *pdev_name,
3573 struct platform_device __init *omap_device_build_ss(const char *pdev_name,
3574 int pdev_id,
3575 struct omap_hwmod **ohs,
3576- int oh_cnt, void *pdata,
3577+ int oh_cnt, const void *pdata,
3578 int pdata_len)
3579 {
3580 int ret = -ENOMEM;
3581diff --git a/arch/arm/mach-omap2/omap_device.h b/arch/arm/mach-omap2/omap_device.h
3582index 78c02b3..c94109a 100644
3583--- a/arch/arm/mach-omap2/omap_device.h
3584+++ b/arch/arm/mach-omap2/omap_device.h
3585@@ -72,12 +72,12 @@ int omap_device_idle(struct platform_device *pdev);
3586 /* Core code interface */
3587
3588 struct platform_device *omap_device_build(const char *pdev_name, int pdev_id,
3589- struct omap_hwmod *oh, void *pdata,
3590+ struct omap_hwmod *oh, const void *pdata,
3591 int pdata_len);
3592
3593 struct platform_device *omap_device_build_ss(const char *pdev_name, int pdev_id,
3594 struct omap_hwmod **oh, int oh_cnt,
3595- void *pdata, int pdata_len);
3596+ const void *pdata, int pdata_len);
3597
3598 struct omap_device *omap_device_alloc(struct platform_device *pdev,
3599 struct omap_hwmod **ohs, int oh_cnt);
3600diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
3601index 355b089..2c9d7c3 100644
3602--- a/arch/arm/mach-omap2/omap_hwmod.c
3603+++ b/arch/arm/mach-omap2/omap_hwmod.c
3604@@ -193,10 +193,10 @@ struct omap_hwmod_soc_ops {
3605 int (*init_clkdm)(struct omap_hwmod *oh);
3606 void (*update_context_lost)(struct omap_hwmod *oh);
3607 int (*get_context_lost)(struct omap_hwmod *oh);
3608-};
3609+} __no_const;
3610
3611 /* soc_ops: adapts the omap_hwmod code to the currently-booted SoC */
3612-static struct omap_hwmod_soc_ops soc_ops;
3613+static struct omap_hwmod_soc_ops soc_ops __read_only;
3614
3615 /* omap_hwmod_list contains all registered struct omap_hwmods */
3616 static LIST_HEAD(omap_hwmod_list);
3617diff --git a/arch/arm/mach-omap2/powerdomains43xx_data.c b/arch/arm/mach-omap2/powerdomains43xx_data.c
3618index 95fee54..cfa9cf1 100644
3619--- a/arch/arm/mach-omap2/powerdomains43xx_data.c
3620+++ b/arch/arm/mach-omap2/powerdomains43xx_data.c
3621@@ -10,6 +10,7 @@
3622
3623 #include <linux/kernel.h>
3624 #include <linux/init.h>
3625+#include <asm/pgtable.h>
3626
3627 #include "powerdomain.h"
3628
3629@@ -129,7 +130,9 @@ static int am43xx_check_vcvp(void)
3630
3631 void __init am43xx_powerdomains_init(void)
3632 {
3633- omap4_pwrdm_operations.pwrdm_has_voltdm = am43xx_check_vcvp;
3634+ pax_open_kernel();
3635+ *(void **)&omap4_pwrdm_operations.pwrdm_has_voltdm = am43xx_check_vcvp;
3636+ pax_close_kernel();
3637 pwrdm_register_platform_funcs(&omap4_pwrdm_operations);
3638 pwrdm_register_pwrdms(powerdomains_am43xx);
3639 pwrdm_complete_init();
3640diff --git a/arch/arm/mach-omap2/wd_timer.c b/arch/arm/mach-omap2/wd_timer.c
3641index ff0a68c..b312aa0 100644
3642--- a/arch/arm/mach-omap2/wd_timer.c
3643+++ b/arch/arm/mach-omap2/wd_timer.c
3644@@ -110,7 +110,9 @@ static int __init omap_init_wdt(void)
3645 struct omap_hwmod *oh;
3646 char *oh_name = "wd_timer2";
3647 char *dev_name = "omap_wdt";
3648- struct omap_wd_timer_platform_data pdata;
3649+ static struct omap_wd_timer_platform_data pdata = {
3650+ .read_reset_sources = prm_read_reset_sources
3651+ };
3652
3653 if (!cpu_class_is_omap2() || of_have_populated_dt())
3654 return 0;
3655@@ -121,8 +123,6 @@ static int __init omap_init_wdt(void)
3656 return -EINVAL;
3657 }
3658
3659- pdata.read_reset_sources = prm_read_reset_sources;
3660-
3661 pdev = omap_device_build(dev_name, id, oh, &pdata,
3662 sizeof(struct omap_wd_timer_platform_data));
3663 WARN(IS_ERR(pdev), "Can't build omap_device for %s:%s.\n",
3664diff --git a/arch/arm/mach-tegra/cpuidle-tegra20.c b/arch/arm/mach-tegra/cpuidle-tegra20.c
3665index a351eff..87baad9 100644
3666--- a/arch/arm/mach-tegra/cpuidle-tegra20.c
3667+++ b/arch/arm/mach-tegra/cpuidle-tegra20.c
3668@@ -178,7 +178,7 @@ static int tegra20_idle_lp2_coupled(struct cpuidle_device *dev,
3669 bool entered_lp2 = false;
3670
3671 if (tegra_pending_sgi())
3672- ACCESS_ONCE(abort_flag) = true;
3673+ ACCESS_ONCE_RW(abort_flag) = true;
3674
3675 cpuidle_coupled_parallel_barrier(dev, &abort_barrier);
3676
3677diff --git a/arch/arm/mach-tegra/irq.c b/arch/arm/mach-tegra/irq.c
3678index ab95f53..4b977a7 100644
3679--- a/arch/arm/mach-tegra/irq.c
3680+++ b/arch/arm/mach-tegra/irq.c
3681@@ -20,6 +20,7 @@
3682 #include <linux/cpu_pm.h>
3683 #include <linux/interrupt.h>
3684 #include <linux/io.h>
3685+#include <linux/irq.h>
3686 #include <linux/irqchip/arm-gic.h>
3687 #include <linux/irq.h>
3688 #include <linux/kernel.h>
3689diff --git a/arch/arm/mach-ux500/pm.c b/arch/arm/mach-ux500/pm.c
3690index 2cb587b..6ddfebf 100644
3691--- a/arch/arm/mach-ux500/pm.c
3692+++ b/arch/arm/mach-ux500/pm.c
3693@@ -10,6 +10,7 @@
3694 */
3695
3696 #include <linux/kernel.h>
3697+#include <linux/irq.h>
3698 #include <linux/irqchip/arm-gic.h>
3699 #include <linux/delay.h>
3700 #include <linux/io.h>
3701diff --git a/arch/arm/mach-ux500/setup.h b/arch/arm/mach-ux500/setup.h
3702index 2dea8b5..6499da2 100644
3703--- a/arch/arm/mach-ux500/setup.h
3704+++ b/arch/arm/mach-ux500/setup.h
3705@@ -33,13 +33,6 @@ extern void ux500_timer_init(void);
3706 .type = MT_DEVICE, \
3707 }
3708
3709-#define __MEM_DEV_DESC(x, sz) { \
3710- .virtual = IO_ADDRESS(x), \
3711- .pfn = __phys_to_pfn(x), \
3712- .length = sz, \
3713- .type = MT_MEMORY_RWX, \
3714-}
3715-
3716 extern struct smp_operations ux500_smp_ops;
3717 extern void ux500_cpu_die(unsigned int cpu);
3718
3719diff --git a/arch/arm/mach-zynq/platsmp.c b/arch/arm/mach-zynq/platsmp.c
3720index 52d768f..5f93180 100644
3721--- a/arch/arm/mach-zynq/platsmp.c
3722+++ b/arch/arm/mach-zynq/platsmp.c
3723@@ -24,6 +24,7 @@
3724 #include <linux/io.h>
3725 #include <asm/cacheflush.h>
3726 #include <asm/smp_scu.h>
3727+#include <linux/irq.h>
3728 #include <linux/irqchip/arm-gic.h>
3729 #include "common.h"
3730
3731diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
3732index 9b4f29e..bbf3bfa 100644
3733--- a/arch/arm/mm/Kconfig
3734+++ b/arch/arm/mm/Kconfig
3735@@ -446,6 +446,7 @@ config CPU_32v5
3736
3737 config CPU_32v6
3738 bool
3739+ select CPU_USE_DOMAINS if CPU_V6 && MMU && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
3740 select TLS_REG_EMUL if !CPU_32v6K && !MMU
3741
3742 config CPU_32v6K
3743@@ -600,6 +601,7 @@ config CPU_CP15_MPU
3744
3745 config CPU_USE_DOMAINS
3746 bool
3747+ depends on !ARM_LPAE && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
3748 help
3749 This option enables or disables the use of domain switching
3750 via the set_fs() function.
3751@@ -798,7 +800,7 @@ config NEED_KUSER_HELPERS
3752
3753 config KUSER_HELPERS
3754 bool "Enable kuser helpers in vector page" if !NEED_KUSER_HELPERS
3755- depends on MMU
3756+ depends on MMU && (!(CPU_V6 || CPU_V6K || CPU_V7) || GRKERNSEC_OLD_ARM_USERLAND)
3757 default y
3758 help
3759 Warning: disabling this option may break user programs.
3760@@ -812,7 +814,7 @@ config KUSER_HELPERS
3761 See Documentation/arm/kernel_user_helpers.txt for details.
3762
3763 However, the fixed address nature of these helpers can be used
3764- by ROP (return orientated programming) authors when creating
3765+ by ROP (Return Oriented Programming) authors when creating
3766 exploits.
3767
3768 If all of the binaries and libraries which run on your platform
3769diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
3770index 2c0c541..4585df9 100644
3771--- a/arch/arm/mm/alignment.c
3772+++ b/arch/arm/mm/alignment.c
3773@@ -216,10 +216,12 @@ union offset_union {
3774 #define __get16_unaligned_check(ins,val,addr) \
3775 do { \
3776 unsigned int err = 0, v, a = addr; \
3777+ pax_open_userland(); \
3778 __get8_unaligned_check(ins,v,a,err); \
3779 val = v << ((BE) ? 8 : 0); \
3780 __get8_unaligned_check(ins,v,a,err); \
3781 val |= v << ((BE) ? 0 : 8); \
3782+ pax_close_userland(); \
3783 if (err) \
3784 goto fault; \
3785 } while (0)
3786@@ -233,6 +235,7 @@ union offset_union {
3787 #define __get32_unaligned_check(ins,val,addr) \
3788 do { \
3789 unsigned int err = 0, v, a = addr; \
3790+ pax_open_userland(); \
3791 __get8_unaligned_check(ins,v,a,err); \
3792 val = v << ((BE) ? 24 : 0); \
3793 __get8_unaligned_check(ins,v,a,err); \
3794@@ -241,6 +244,7 @@ union offset_union {
3795 val |= v << ((BE) ? 8 : 16); \
3796 __get8_unaligned_check(ins,v,a,err); \
3797 val |= v << ((BE) ? 0 : 24); \
3798+ pax_close_userland(); \
3799 if (err) \
3800 goto fault; \
3801 } while (0)
3802@@ -254,6 +258,7 @@ union offset_union {
3803 #define __put16_unaligned_check(ins,val,addr) \
3804 do { \
3805 unsigned int err = 0, v = val, a = addr; \
3806+ pax_open_userland(); \
3807 __asm__( FIRST_BYTE_16 \
3808 ARM( "1: "ins" %1, [%2], #1\n" ) \
3809 THUMB( "1: "ins" %1, [%2]\n" ) \
3810@@ -273,6 +278,7 @@ union offset_union {
3811 " .popsection\n" \
3812 : "=r" (err), "=&r" (v), "=&r" (a) \
3813 : "0" (err), "1" (v), "2" (a)); \
3814+ pax_close_userland(); \
3815 if (err) \
3816 goto fault; \
3817 } while (0)
3818@@ -286,6 +292,7 @@ union offset_union {
3819 #define __put32_unaligned_check(ins,val,addr) \
3820 do { \
3821 unsigned int err = 0, v = val, a = addr; \
3822+ pax_open_userland(); \
3823 __asm__( FIRST_BYTE_32 \
3824 ARM( "1: "ins" %1, [%2], #1\n" ) \
3825 THUMB( "1: "ins" %1, [%2]\n" ) \
3826@@ -315,6 +322,7 @@ union offset_union {
3827 " .popsection\n" \
3828 : "=r" (err), "=&r" (v), "=&r" (a) \
3829 : "0" (err), "1" (v), "2" (a)); \
3830+ pax_close_userland(); \
3831 if (err) \
3832 goto fault; \
3833 } while (0)
3834diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
3835index 8f15f70..d599a2b 100644
3836--- a/arch/arm/mm/cache-l2x0.c
3837+++ b/arch/arm/mm/cache-l2x0.c
3838@@ -43,7 +43,7 @@ struct l2c_init_data {
3839 void (*save)(void __iomem *);
3840 void (*configure)(void __iomem *);
3841 struct outer_cache_fns outer_cache;
3842-};
3843+} __do_const;
3844
3845 #define CACHE_LINE_SIZE 32
3846
3847diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
3848index 845769e..4278fd7 100644
3849--- a/arch/arm/mm/context.c
3850+++ b/arch/arm/mm/context.c
3851@@ -43,7 +43,7 @@
3852 #define NUM_USER_ASIDS ASID_FIRST_VERSION
3853
3854 static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
3855-static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
3856+static atomic64_unchecked_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
3857 static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS);
3858
3859 static DEFINE_PER_CPU(atomic64_t, active_asids);
3860@@ -178,7 +178,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
3861 {
3862 static u32 cur_idx = 1;
3863 u64 asid = atomic64_read(&mm->context.id);
3864- u64 generation = atomic64_read(&asid_generation);
3865+ u64 generation = atomic64_read_unchecked(&asid_generation);
3866
3867 if (asid != 0) {
3868 /*
3869@@ -208,7 +208,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
3870 */
3871 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx);
3872 if (asid == NUM_USER_ASIDS) {
3873- generation = atomic64_add_return(ASID_FIRST_VERSION,
3874+ generation = atomic64_add_return_unchecked(ASID_FIRST_VERSION,
3875 &asid_generation);
3876 flush_context(cpu);
3877 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
3878@@ -240,14 +240,14 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
3879 cpu_set_reserved_ttbr0();
3880
3881 asid = atomic64_read(&mm->context.id);
3882- if (!((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS)
3883+ if (!((asid ^ atomic64_read_unchecked(&asid_generation)) >> ASID_BITS)
3884 && atomic64_xchg(&per_cpu(active_asids, cpu), asid))
3885 goto switch_mm_fastpath;
3886
3887 raw_spin_lock_irqsave(&cpu_asid_lock, flags);
3888 /* Check that our ASID belongs to the current generation. */
3889 asid = atomic64_read(&mm->context.id);
3890- if ((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS) {
3891+ if ((asid ^ atomic64_read_unchecked(&asid_generation)) >> ASID_BITS) {
3892 asid = new_context(mm, cpu);
3893 atomic64_set(&mm->context.id, asid);
3894 }
3895diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
3896index 6333d9c..3bb19f2 100644
3897--- a/arch/arm/mm/fault.c
3898+++ b/arch/arm/mm/fault.c
3899@@ -25,6 +25,7 @@
3900 #include <asm/system_misc.h>
3901 #include <asm/system_info.h>
3902 #include <asm/tlbflush.h>
3903+#include <asm/sections.h>
3904
3905 #include "fault.h"
3906
3907@@ -138,6 +139,31 @@ __do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
3908 if (fixup_exception(regs))
3909 return;
3910
3911+#ifdef CONFIG_PAX_MEMORY_UDEREF
3912+ if (addr < TASK_SIZE) {
3913+ if (current->signal->curr_ip)
3914+ printk(KERN_EMERG "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
3915+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
3916+ else
3917+ printk(KERN_EMERG "PAX: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", current->comm, task_pid_nr(current),
3918+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
3919+ }
3920+#endif
3921+
3922+#ifdef CONFIG_PAX_KERNEXEC
3923+ if ((fsr & FSR_WRITE) &&
3924+ (((unsigned long)_stext <= addr && addr < init_mm.end_code) ||
3925+ (MODULES_VADDR <= addr && addr < MODULES_END)))
3926+ {
3927+ if (current->signal->curr_ip)
3928+ printk(KERN_EMERG "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
3929+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
3930+ else
3931+ printk(KERN_EMERG "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", current->comm, task_pid_nr(current),
3932+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
3933+ }
3934+#endif
3935+
3936 /*
3937 * No handler, we'll have to terminate things with extreme prejudice.
3938 */
3939@@ -173,6 +199,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
3940 }
3941 #endif
3942
3943+#ifdef CONFIG_PAX_PAGEEXEC
3944+ if (fsr & FSR_LNX_PF) {
3945+ pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
3946+ do_group_exit(SIGKILL);
3947+ }
3948+#endif
3949+
3950 tsk->thread.address = addr;
3951 tsk->thread.error_code = fsr;
3952 tsk->thread.trap_no = 14;
3953@@ -400,6 +433,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
3954 }
3955 #endif /* CONFIG_MMU */
3956
3957+#ifdef CONFIG_PAX_PAGEEXEC
3958+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
3959+{
3960+ long i;
3961+
3962+ printk(KERN_ERR "PAX: bytes at PC: ");
3963+ for (i = 0; i < 20; i++) {
3964+ unsigned char c;
3965+ if (get_user(c, (__force unsigned char __user *)pc+i))
3966+ printk(KERN_CONT "?? ");
3967+ else
3968+ printk(KERN_CONT "%02x ", c);
3969+ }
3970+ printk("\n");
3971+
3972+ printk(KERN_ERR "PAX: bytes at SP-4: ");
3973+ for (i = -1; i < 20; i++) {
3974+ unsigned long c;
3975+ if (get_user(c, (__force unsigned long __user *)sp+i))
3976+ printk(KERN_CONT "???????? ");
3977+ else
3978+ printk(KERN_CONT "%08lx ", c);
3979+ }
3980+ printk("\n");
3981+}
3982+#endif
3983+
3984 /*
3985 * First Level Translation Fault Handler
3986 *
3987@@ -547,9 +607,22 @@ do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
3988 const struct fsr_info *inf = fsr_info + fsr_fs(fsr);
3989 struct siginfo info;
3990
3991+#ifdef CONFIG_PAX_MEMORY_UDEREF
3992+ if (addr < TASK_SIZE && is_domain_fault(fsr)) {
3993+ if (current->signal->curr_ip)
3994+ printk(KERN_EMERG "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
3995+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
3996+ else
3997+ printk(KERN_EMERG "PAX: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", current->comm, task_pid_nr(current),
3998+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
3999+ goto die;
4000+ }
4001+#endif
4002+
4003 if (!inf->fn(addr, fsr & ~FSR_LNX_PF, regs))
4004 return;
4005
4006+die:
4007 pr_alert("Unhandled fault: %s (0x%03x) at 0x%08lx\n",
4008 inf->name, fsr, addr);
4009 show_pte(current->mm, addr);
4010@@ -574,15 +647,104 @@ hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *
4011 ifsr_info[nr].name = name;
4012 }
4013
4014+asmlinkage int sys_sigreturn(struct pt_regs *regs);
4015+asmlinkage int sys_rt_sigreturn(struct pt_regs *regs);
4016+
4017 asmlinkage void __exception
4018 do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
4019 {
4020 const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
4021 struct siginfo info;
4022+ unsigned long pc = instruction_pointer(regs);
4023+
4024+ if (user_mode(regs)) {
4025+ unsigned long sigpage = current->mm->context.sigpage;
4026+
4027+ if (sigpage <= pc && pc < sigpage + 7*4) {
4028+ if (pc < sigpage + 3*4)
4029+ sys_sigreturn(regs);
4030+ else
4031+ sys_rt_sigreturn(regs);
4032+ return;
4033+ }
4034+ if (pc == 0xffff0f60UL) {
4035+ /*
4036+ * PaX: __kuser_cmpxchg64 emulation
4037+ */
4038+ // TODO
4039+ //regs->ARM_pc = regs->ARM_lr;
4040+ //return;
4041+ }
4042+ if (pc == 0xffff0fa0UL) {
4043+ /*
4044+ * PaX: __kuser_memory_barrier emulation
4045+ */
4046+ // dmb(); implied by the exception
4047+ regs->ARM_pc = regs->ARM_lr;
4048+ return;
4049+ }
4050+ if (pc == 0xffff0fc0UL) {
4051+ /*
4052+ * PaX: __kuser_cmpxchg emulation
4053+ */
4054+ // TODO
4055+ //long new;
4056+ //int op;
4057+
4058+ //op = FUTEX_OP_SET << 28;
4059+ //new = futex_atomic_op_inuser(op, regs->ARM_r2);
4060+ //regs->ARM_r0 = old != new;
4061+ //regs->ARM_pc = regs->ARM_lr;
4062+ //return;
4063+ }
4064+ if (pc == 0xffff0fe0UL) {
4065+ /*
4066+ * PaX: __kuser_get_tls emulation
4067+ */
4068+ regs->ARM_r0 = current_thread_info()->tp_value[0];
4069+ regs->ARM_pc = regs->ARM_lr;
4070+ return;
4071+ }
4072+ }
4073+
4074+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
4075+ else if (is_domain_fault(ifsr) || is_xn_fault(ifsr)) {
4076+ if (current->signal->curr_ip)
4077+ printk(KERN_EMERG "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
4078+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()),
4079+ pc >= TASK_SIZE ? "non-executable kernel" : "userland", pc);
4080+ else
4081+ printk(KERN_EMERG "PAX: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", current->comm, task_pid_nr(current),
4082+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()),
4083+ pc >= TASK_SIZE ? "non-executable kernel" : "userland", pc);
4084+ goto die;
4085+ }
4086+#endif
4087+
4088+#ifdef CONFIG_PAX_REFCOUNT
4089+ if (fsr_fs(ifsr) == FAULT_CODE_DEBUG) {
4090+#ifdef CONFIG_THUMB2_KERNEL
4091+ unsigned short bkpt;
4092+
4093+ if (!probe_kernel_address(pc, bkpt) && cpu_to_le16(bkpt) == 0xbef1) {
4094+#else
4095+ unsigned int bkpt;
4096+
4097+ if (!probe_kernel_address(pc, bkpt) && cpu_to_le32(bkpt) == 0xe12f1073) {
4098+#endif
4099+ current->thread.error_code = ifsr;
4100+ current->thread.trap_no = 0;
4101+ pax_report_refcount_overflow(regs);
4102+ fixup_exception(regs);
4103+ return;
4104+ }
4105+ }
4106+#endif
4107
4108 if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
4109 return;
4110
4111+die:
4112 pr_alert("Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n",
4113 inf->name, ifsr, addr);
4114
4115diff --git a/arch/arm/mm/fault.h b/arch/arm/mm/fault.h
4116index cf08bdf..772656c 100644
4117--- a/arch/arm/mm/fault.h
4118+++ b/arch/arm/mm/fault.h
4119@@ -3,6 +3,7 @@
4120
4121 /*
4122 * Fault status register encodings. We steal bit 31 for our own purposes.
4123+ * Set when the FSR value is from an instruction fault.
4124 */
4125 #define FSR_LNX_PF (1 << 31)
4126 #define FSR_WRITE (1 << 11)
4127@@ -22,6 +23,17 @@ static inline int fsr_fs(unsigned int fsr)
4128 }
4129 #endif
4130
4131+/* valid for LPAE and !LPAE */
4132+static inline int is_xn_fault(unsigned int fsr)
4133+{
4134+ return ((fsr_fs(fsr) & 0x3c) == 0xc);
4135+}
4136+
4137+static inline int is_domain_fault(unsigned int fsr)
4138+{
4139+ return ((fsr_fs(fsr) & 0xD) == 0x9);
4140+}
4141+
4142 void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs);
4143 unsigned long search_exception_table(unsigned long addr);
4144
4145diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
4146index 1609b02..def0785 100644
4147--- a/arch/arm/mm/init.c
4148+++ b/arch/arm/mm/init.c
4149@@ -755,7 +755,46 @@ void free_tcmmem(void)
4150 {
4151 #ifdef CONFIG_HAVE_TCM
4152 extern char __tcm_start, __tcm_end;
4153+#endif
4154
4155+#ifdef CONFIG_PAX_KERNEXEC
4156+ unsigned long addr;
4157+ pgd_t *pgd;
4158+ pud_t *pud;
4159+ pmd_t *pmd;
4160+ int cpu_arch = cpu_architecture();
4161+ unsigned int cr = get_cr();
4162+
4163+ if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
4164+ /* make pages tables, etc before .text NX */
4165+ for (addr = PAGE_OFFSET; addr < (unsigned long)_stext; addr += SECTION_SIZE) {
4166+ pgd = pgd_offset_k(addr);
4167+ pud = pud_offset(pgd, addr);
4168+ pmd = pmd_offset(pud, addr);
4169+ __section_update(pmd, addr, PMD_SECT_XN);
4170+ }
4171+ /* make init NX */
4172+ for (addr = (unsigned long)__init_begin; addr < (unsigned long)_sdata; addr += SECTION_SIZE) {
4173+ pgd = pgd_offset_k(addr);
4174+ pud = pud_offset(pgd, addr);
4175+ pmd = pmd_offset(pud, addr);
4176+ __section_update(pmd, addr, PMD_SECT_XN);
4177+ }
4178+ /* make kernel code/rodata RX */
4179+ for (addr = (unsigned long)_stext; addr < (unsigned long)__init_begin; addr += SECTION_SIZE) {
4180+ pgd = pgd_offset_k(addr);
4181+ pud = pud_offset(pgd, addr);
4182+ pmd = pmd_offset(pud, addr);
4183+#ifdef CONFIG_ARM_LPAE
4184+ __section_update(pmd, addr, PMD_SECT_RDONLY);
4185+#else
4186+ __section_update(pmd, addr, PMD_SECT_APX|PMD_SECT_AP_WRITE);
4187+#endif
4188+ }
4189+ }
4190+#endif
4191+
4192+#ifdef CONFIG_HAVE_TCM
4193 poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start);
4194 free_reserved_area(&__tcm_start, &__tcm_end, -1, "TCM link");
4195 #endif
4196diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
4197index d1e5ad7..84dcbf2 100644
4198--- a/arch/arm/mm/ioremap.c
4199+++ b/arch/arm/mm/ioremap.c
4200@@ -392,9 +392,9 @@ __arm_ioremap_exec(phys_addr_t phys_addr, size_t size, bool cached)
4201 unsigned int mtype;
4202
4203 if (cached)
4204- mtype = MT_MEMORY_RWX;
4205+ mtype = MT_MEMORY_RX;
4206 else
4207- mtype = MT_MEMORY_RWX_NONCACHED;
4208+ mtype = MT_MEMORY_RX_NONCACHED;
4209
4210 return __arm_ioremap_caller(phys_addr, size, mtype,
4211 __builtin_return_address(0));
4212diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
4213index 5e85ed3..b10a7ed 100644
4214--- a/arch/arm/mm/mmap.c
4215+++ b/arch/arm/mm/mmap.c
4216@@ -59,6 +59,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4217 struct vm_area_struct *vma;
4218 int do_align = 0;
4219 int aliasing = cache_is_vipt_aliasing();
4220+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
4221 struct vm_unmapped_area_info info;
4222
4223 /*
4224@@ -81,6 +82,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4225 if (len > TASK_SIZE)
4226 return -ENOMEM;
4227
4228+#ifdef CONFIG_PAX_RANDMMAP
4229+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4230+#endif
4231+
4232 if (addr) {
4233 if (do_align)
4234 addr = COLOUR_ALIGN(addr, pgoff);
4235@@ -88,8 +93,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4236 addr = PAGE_ALIGN(addr);
4237
4238 vma = find_vma(mm, addr);
4239- if (TASK_SIZE - len >= addr &&
4240- (!vma || addr + len <= vma->vm_start))
4241+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
4242 return addr;
4243 }
4244
4245@@ -99,6 +103,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4246 info.high_limit = TASK_SIZE;
4247 info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
4248 info.align_offset = pgoff << PAGE_SHIFT;
4249+ info.threadstack_offset = offset;
4250 return vm_unmapped_area(&info);
4251 }
4252
4253@@ -112,6 +117,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4254 unsigned long addr = addr0;
4255 int do_align = 0;
4256 int aliasing = cache_is_vipt_aliasing();
4257+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
4258 struct vm_unmapped_area_info info;
4259
4260 /*
4261@@ -132,6 +138,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4262 return addr;
4263 }
4264
4265+#ifdef CONFIG_PAX_RANDMMAP
4266+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4267+#endif
4268+
4269 /* requesting a specific address */
4270 if (addr) {
4271 if (do_align)
4272@@ -139,8 +149,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4273 else
4274 addr = PAGE_ALIGN(addr);
4275 vma = find_vma(mm, addr);
4276- if (TASK_SIZE - len >= addr &&
4277- (!vma || addr + len <= vma->vm_start))
4278+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
4279 return addr;
4280 }
4281
4282@@ -150,6 +159,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4283 info.high_limit = mm->mmap_base;
4284 info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
4285 info.align_offset = pgoff << PAGE_SHIFT;
4286+ info.threadstack_offset = offset;
4287 addr = vm_unmapped_area(&info);
4288
4289 /*
4290@@ -173,6 +183,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4291 {
4292 unsigned long random_factor = 0UL;
4293
4294+#ifdef CONFIG_PAX_RANDMMAP
4295+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4296+#endif
4297+
4298 /* 8 bits of randomness in 20 address space bits */
4299 if ((current->flags & PF_RANDOMIZE) &&
4300 !(current->personality & ADDR_NO_RANDOMIZE))
4301@@ -180,9 +194,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4302
4303 if (mmap_is_legacy()) {
4304 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
4305+
4306+#ifdef CONFIG_PAX_RANDMMAP
4307+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4308+ mm->mmap_base += mm->delta_mmap;
4309+#endif
4310+
4311 mm->get_unmapped_area = arch_get_unmapped_area;
4312 } else {
4313 mm->mmap_base = mmap_base(random_factor);
4314+
4315+#ifdef CONFIG_PAX_RANDMMAP
4316+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4317+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4318+#endif
4319+
4320 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4321 }
4322 }
4323diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
4324index 7186382..0c145cf 100644
4325--- a/arch/arm/mm/mmu.c
4326+++ b/arch/arm/mm/mmu.c
4327@@ -41,6 +41,22 @@
4328 #include "mm.h"
4329 #include "tcm.h"
4330
4331+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
4332+void modify_domain(unsigned int dom, unsigned int type)
4333+{
4334+ struct thread_info *thread = current_thread_info();
4335+ unsigned int domain = thread->cpu_domain;
4336+ /*
4337+ * DOMAIN_MANAGER might be defined to some other value,
4338+ * use the arch-defined constant
4339+ */
4340+ domain &= ~domain_val(dom, 3);
4341+ thread->cpu_domain = domain | domain_val(dom, type);
4342+ set_domain(thread->cpu_domain);
4343+}
4344+EXPORT_SYMBOL(modify_domain);
4345+#endif
4346+
4347 /*
4348 * empty_zero_page is a special page that is used for
4349 * zero-initialized data and COW.
4350@@ -242,7 +258,15 @@ __setup("noalign", noalign_setup);
4351 #define PROT_PTE_S2_DEVICE PROT_PTE_DEVICE
4352 #define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE
4353
4354-static struct mem_type mem_types[] = {
4355+#ifdef CONFIG_PAX_KERNEXEC
4356+#define L_PTE_KERNEXEC L_PTE_RDONLY
4357+#define PMD_SECT_KERNEXEC PMD_SECT_RDONLY
4358+#else
4359+#define L_PTE_KERNEXEC L_PTE_DIRTY
4360+#define PMD_SECT_KERNEXEC PMD_SECT_AP_WRITE
4361+#endif
4362+
4363+static struct mem_type mem_types[] __read_only = {
4364 [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */
4365 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
4366 L_PTE_SHARED,
4367@@ -271,19 +295,19 @@ static struct mem_type mem_types[] = {
4368 .prot_sect = PROT_SECT_DEVICE,
4369 .domain = DOMAIN_IO,
4370 },
4371- [MT_UNCACHED] = {
4372+ [MT_UNCACHED_RW] = {
4373 .prot_pte = PROT_PTE_DEVICE,
4374 .prot_l1 = PMD_TYPE_TABLE,
4375 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4376 .domain = DOMAIN_IO,
4377 },
4378- [MT_CACHECLEAN] = {
4379- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4380+ [MT_CACHECLEAN_RO] = {
4381+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_RDONLY,
4382 .domain = DOMAIN_KERNEL,
4383 },
4384 #ifndef CONFIG_ARM_LPAE
4385- [MT_MINICLEAN] = {
4386- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE,
4387+ [MT_MINICLEAN_RO] = {
4388+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_MINICACHE | PMD_SECT_XN | PMD_SECT_RDONLY,
4389 .domain = DOMAIN_KERNEL,
4390 },
4391 #endif
4392@@ -291,15 +315,15 @@ static struct mem_type mem_types[] = {
4393 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4394 L_PTE_RDONLY,
4395 .prot_l1 = PMD_TYPE_TABLE,
4396- .domain = DOMAIN_USER,
4397+ .domain = DOMAIN_VECTORS,
4398 },
4399 [MT_HIGH_VECTORS] = {
4400 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4401 L_PTE_USER | L_PTE_RDONLY,
4402 .prot_l1 = PMD_TYPE_TABLE,
4403- .domain = DOMAIN_USER,
4404+ .domain = DOMAIN_VECTORS,
4405 },
4406- [MT_MEMORY_RWX] = {
4407+ [__MT_MEMORY_RWX] = {
4408 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
4409 .prot_l1 = PMD_TYPE_TABLE,
4410 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4411@@ -312,17 +336,30 @@ static struct mem_type mem_types[] = {
4412 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4413 .domain = DOMAIN_KERNEL,
4414 },
4415- [MT_ROM] = {
4416- .prot_sect = PMD_TYPE_SECT,
4417+ [MT_MEMORY_RX] = {
4418+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC,
4419+ .prot_l1 = PMD_TYPE_TABLE,
4420+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
4421+ .domain = DOMAIN_KERNEL,
4422+ },
4423+ [MT_ROM_RX] = {
4424+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_RDONLY,
4425 .domain = DOMAIN_KERNEL,
4426 },
4427- [MT_MEMORY_RWX_NONCACHED] = {
4428+ [MT_MEMORY_RW_NONCACHED] = {
4429 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4430 L_PTE_MT_BUFFERABLE,
4431 .prot_l1 = PMD_TYPE_TABLE,
4432 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4433 .domain = DOMAIN_KERNEL,
4434 },
4435+ [MT_MEMORY_RX_NONCACHED] = {
4436+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC |
4437+ L_PTE_MT_BUFFERABLE,
4438+ .prot_l1 = PMD_TYPE_TABLE,
4439+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
4440+ .domain = DOMAIN_KERNEL,
4441+ },
4442 [MT_MEMORY_RW_DTCM] = {
4443 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4444 L_PTE_XN,
4445@@ -330,9 +367,10 @@ static struct mem_type mem_types[] = {
4446 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4447 .domain = DOMAIN_KERNEL,
4448 },
4449- [MT_MEMORY_RWX_ITCM] = {
4450- .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
4451+ [MT_MEMORY_RX_ITCM] = {
4452+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC,
4453 .prot_l1 = PMD_TYPE_TABLE,
4454+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
4455 .domain = DOMAIN_KERNEL,
4456 },
4457 [MT_MEMORY_RW_SO] = {
4458@@ -544,9 +582,14 @@ static void __init build_mem_type_table(void)
4459 * Mark cache clean areas and XIP ROM read only
4460 * from SVC mode and no access from userspace.
4461 */
4462- mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4463- mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4464- mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4465+ mem_types[MT_ROM_RX].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4466+#ifdef CONFIG_PAX_KERNEXEC
4467+ mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4468+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4469+ mem_types[MT_MEMORY_RX_ITCM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4470+#endif
4471+ mem_types[MT_MINICLEAN_RO].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4472+ mem_types[MT_CACHECLEAN_RO].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4473 #endif
4474
4475 /*
4476@@ -563,13 +606,17 @@ static void __init build_mem_type_table(void)
4477 mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
4478 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
4479 mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
4480- mem_types[MT_MEMORY_RWX].prot_sect |= PMD_SECT_S;
4481- mem_types[MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED;
4482+ mem_types[__MT_MEMORY_RWX].prot_sect |= PMD_SECT_S;
4483+ mem_types[__MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED;
4484 mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_S;
4485 mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_SHARED;
4486+ mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_S;
4487+ mem_types[MT_MEMORY_RX].prot_pte |= L_PTE_SHARED;
4488 mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
4489- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_S;
4490- mem_types[MT_MEMORY_RWX_NONCACHED].prot_pte |= L_PTE_SHARED;
4491+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |= PMD_SECT_S;
4492+ mem_types[MT_MEMORY_RW_NONCACHED].prot_pte |= L_PTE_SHARED;
4493+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= PMD_SECT_S;
4494+ mem_types[MT_MEMORY_RX_NONCACHED].prot_pte |= L_PTE_SHARED;
4495 }
4496 }
4497
4498@@ -580,15 +627,20 @@ static void __init build_mem_type_table(void)
4499 if (cpu_arch >= CPU_ARCH_ARMv6) {
4500 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
4501 /* Non-cacheable Normal is XCB = 001 */
4502- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |=
4503+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |=
4504+ PMD_SECT_BUFFERED;
4505+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |=
4506 PMD_SECT_BUFFERED;
4507 } else {
4508 /* For both ARMv6 and non-TEX-remapping ARMv7 */
4509- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |=
4510+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |=
4511+ PMD_SECT_TEX(1);
4512+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |=
4513 PMD_SECT_TEX(1);
4514 }
4515 } else {
4516- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
4517+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
4518+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
4519 }
4520
4521 #ifdef CONFIG_ARM_LPAE
4522@@ -609,6 +661,8 @@ static void __init build_mem_type_table(void)
4523 user_pgprot |= PTE_EXT_PXN;
4524 #endif
4525
4526+ user_pgprot |= __supported_pte_mask;
4527+
4528 for (i = 0; i < 16; i++) {
4529 pteval_t v = pgprot_val(protection_map[i]);
4530 protection_map[i] = __pgprot(v | user_pgprot);
4531@@ -626,21 +680,24 @@ static void __init build_mem_type_table(void)
4532
4533 mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
4534 mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
4535- mem_types[MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd;
4536- mem_types[MT_MEMORY_RWX].prot_pte |= kern_pgprot;
4537+ mem_types[__MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd;
4538+ mem_types[__MT_MEMORY_RWX].prot_pte |= kern_pgprot;
4539 mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd;
4540 mem_types[MT_MEMORY_RW].prot_pte |= kern_pgprot;
4541+ mem_types[MT_MEMORY_RX].prot_sect |= ecc_mask | cp->pmd;
4542+ mem_types[MT_MEMORY_RX].prot_pte |= kern_pgprot;
4543 mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot;
4544- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= ecc_mask;
4545- mem_types[MT_ROM].prot_sect |= cp->pmd;
4546+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |= ecc_mask;
4547+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= ecc_mask;
4548+ mem_types[MT_ROM_RX].prot_sect |= cp->pmd;
4549
4550 switch (cp->pmd) {
4551 case PMD_SECT_WT:
4552- mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WT;
4553+ mem_types[MT_CACHECLEAN_RO].prot_sect |= PMD_SECT_WT;
4554 break;
4555 case PMD_SECT_WB:
4556 case PMD_SECT_WBWA:
4557- mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WB;
4558+ mem_types[MT_CACHECLEAN_RO].prot_sect |= PMD_SECT_WB;
4559 break;
4560 }
4561 pr_info("Memory policy: %sData cache %s\n",
4562@@ -854,7 +911,7 @@ static void __init create_mapping(struct map_desc *md)
4563 return;
4564 }
4565
4566- if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
4567+ if ((md->type == MT_DEVICE || md->type == MT_ROM_RX) &&
4568 md->virtual >= PAGE_OFFSET &&
4569 (md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) {
4570 pr_warn("BUG: mapping for 0x%08llx at 0x%08lx out of vmalloc space\n",
4571@@ -1218,18 +1275,15 @@ void __init arm_mm_memblock_reserve(void)
4572 * called function. This means you can't use any function or debugging
4573 * method which may touch any device, otherwise the kernel _will_ crash.
4574 */
4575+
4576+static char vectors[PAGE_SIZE * 2] __read_only __aligned(PAGE_SIZE);
4577+
4578 static void __init devicemaps_init(const struct machine_desc *mdesc)
4579 {
4580 struct map_desc map;
4581 unsigned long addr;
4582- void *vectors;
4583
4584- /*
4585- * Allocate the vector page early.
4586- */
4587- vectors = early_alloc(PAGE_SIZE * 2);
4588-
4589- early_trap_init(vectors);
4590+ early_trap_init(&vectors);
4591
4592 for (addr = VMALLOC_START; addr; addr += PMD_SIZE)
4593 pmd_clear(pmd_off_k(addr));
4594@@ -1242,7 +1296,7 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
4595 map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK);
4596 map.virtual = MODULES_VADDR;
4597 map.length = ((unsigned long)_etext - map.virtual + ~SECTION_MASK) & SECTION_MASK;
4598- map.type = MT_ROM;
4599+ map.type = MT_ROM_RX;
4600 create_mapping(&map);
4601 #endif
4602
4603@@ -1253,14 +1307,14 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
4604 map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS);
4605 map.virtual = FLUSH_BASE;
4606 map.length = SZ_1M;
4607- map.type = MT_CACHECLEAN;
4608+ map.type = MT_CACHECLEAN_RO;
4609 create_mapping(&map);
4610 #endif
4611 #ifdef FLUSH_BASE_MINICACHE
4612 map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS + SZ_1M);
4613 map.virtual = FLUSH_BASE_MINICACHE;
4614 map.length = SZ_1M;
4615- map.type = MT_MINICLEAN;
4616+ map.type = MT_MINICLEAN_RO;
4617 create_mapping(&map);
4618 #endif
4619
4620@@ -1269,7 +1323,7 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
4621 * location (0xffff0000). If we aren't using high-vectors, also
4622 * create a mapping at the low-vectors virtual address.
4623 */
4624- map.pfn = __phys_to_pfn(virt_to_phys(vectors));
4625+ map.pfn = __phys_to_pfn(virt_to_phys(&vectors));
4626 map.virtual = 0xffff0000;
4627 map.length = PAGE_SIZE;
4628 #ifdef CONFIG_KUSER_HELPERS
4629@@ -1329,8 +1383,10 @@ static void __init kmap_init(void)
4630 static void __init map_lowmem(void)
4631 {
4632 struct memblock_region *reg;
4633+#ifndef CONFIG_PAX_KERNEXEC
4634 phys_addr_t kernel_x_start = round_down(__pa(_stext), SECTION_SIZE);
4635 phys_addr_t kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
4636+#endif
4637
4638 /* Map all the lowmem memory banks. */
4639 for_each_memblock(memory, reg) {
4640@@ -1343,11 +1399,48 @@ static void __init map_lowmem(void)
4641 if (start >= end)
4642 break;
4643
4644+#ifdef CONFIG_PAX_KERNEXEC
4645+ map.pfn = __phys_to_pfn(start);
4646+ map.virtual = __phys_to_virt(start);
4647+ map.length = end - start;
4648+
4649+ if (map.virtual <= (unsigned long)_stext && ((unsigned long)_end < (map.virtual + map.length))) {
4650+ struct map_desc kernel;
4651+ struct map_desc initmap;
4652+
4653+ /* when freeing initmem we will make this RW */
4654+ initmap.pfn = __phys_to_pfn(__pa(__init_begin));
4655+ initmap.virtual = (unsigned long)__init_begin;
4656+ initmap.length = _sdata - __init_begin;
4657+ initmap.type = __MT_MEMORY_RWX;
4658+ create_mapping(&initmap);
4659+
4660+ /* when freeing initmem we will make this RX */
4661+ kernel.pfn = __phys_to_pfn(__pa(_stext));
4662+ kernel.virtual = (unsigned long)_stext;
4663+ kernel.length = __init_begin - _stext;
4664+ kernel.type = __MT_MEMORY_RWX;
4665+ create_mapping(&kernel);
4666+
4667+ if (map.virtual < (unsigned long)_stext) {
4668+ map.length = (unsigned long)_stext - map.virtual;
4669+ map.type = __MT_MEMORY_RWX;
4670+ create_mapping(&map);
4671+ }
4672+
4673+ map.pfn = __phys_to_pfn(__pa(_sdata));
4674+ map.virtual = (unsigned long)_sdata;
4675+ map.length = end - __pa(_sdata);
4676+ }
4677+
4678+ map.type = MT_MEMORY_RW;
4679+ create_mapping(&map);
4680+#else
4681 if (end < kernel_x_start) {
4682 map.pfn = __phys_to_pfn(start);
4683 map.virtual = __phys_to_virt(start);
4684 map.length = end - start;
4685- map.type = MT_MEMORY_RWX;
4686+ map.type = __MT_MEMORY_RWX;
4687
4688 create_mapping(&map);
4689 } else if (start >= kernel_x_end) {
4690@@ -1371,7 +1464,7 @@ static void __init map_lowmem(void)
4691 map.pfn = __phys_to_pfn(kernel_x_start);
4692 map.virtual = __phys_to_virt(kernel_x_start);
4693 map.length = kernel_x_end - kernel_x_start;
4694- map.type = MT_MEMORY_RWX;
4695+ map.type = __MT_MEMORY_RWX;
4696
4697 create_mapping(&map);
4698
4699@@ -1384,6 +1477,7 @@ static void __init map_lowmem(void)
4700 create_mapping(&map);
4701 }
4702 }
4703+#endif
4704 }
4705 }
4706
4707diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
4708index f412b53..fc89433 100644
4709--- a/arch/arm/net/bpf_jit_32.c
4710+++ b/arch/arm/net/bpf_jit_32.c
4711@@ -20,6 +20,7 @@
4712 #include <asm/cacheflush.h>
4713 #include <asm/hwcap.h>
4714 #include <asm/opcodes.h>
4715+#include <asm/pgtable.h>
4716
4717 #include "bpf_jit_32.h"
4718
4719@@ -71,7 +72,11 @@ struct jit_ctx {
4720 #endif
4721 };
4722
4723+#ifdef CONFIG_GRKERNSEC_BPF_HARDEN
4724+int bpf_jit_enable __read_only;
4725+#else
4726 int bpf_jit_enable __read_mostly;
4727+#endif
4728
4729 static u64 jit_get_skb_b(struct sk_buff *skb, unsigned offset)
4730 {
4731@@ -178,8 +183,10 @@ static void jit_fill_hole(void *area, unsigned int size)
4732 {
4733 u32 *ptr;
4734 /* We are guaranteed to have aligned memory. */
4735+ pax_open_kernel();
4736 for (ptr = area; size >= sizeof(u32); size -= sizeof(u32))
4737 *ptr++ = __opcode_to_mem_arm(ARM_INST_UDF);
4738+ pax_close_kernel();
4739 }
4740
4741 static void build_prologue(struct jit_ctx *ctx)
4742diff --git a/arch/arm/plat-iop/setup.c b/arch/arm/plat-iop/setup.c
4743index 5b217f4..c23f40e 100644
4744--- a/arch/arm/plat-iop/setup.c
4745+++ b/arch/arm/plat-iop/setup.c
4746@@ -24,7 +24,7 @@ static struct map_desc iop3xx_std_desc[] __initdata = {
4747 .virtual = IOP3XX_PERIPHERAL_VIRT_BASE,
4748 .pfn = __phys_to_pfn(IOP3XX_PERIPHERAL_PHYS_BASE),
4749 .length = IOP3XX_PERIPHERAL_SIZE,
4750- .type = MT_UNCACHED,
4751+ .type = MT_UNCACHED_RW,
4752 },
4753 };
4754
4755diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c
4756index a5bc92d..0bb4730 100644
4757--- a/arch/arm/plat-omap/sram.c
4758+++ b/arch/arm/plat-omap/sram.c
4759@@ -93,6 +93,8 @@ void __init omap_map_sram(unsigned long start, unsigned long size,
4760 * Looks like we need to preserve some bootloader code at the
4761 * beginning of SRAM for jumping to flash for reboot to work...
4762 */
4763+ pax_open_kernel();
4764 memset_io(omap_sram_base + omap_sram_skip, 0,
4765 omap_sram_size - omap_sram_skip);
4766+ pax_close_kernel();
4767 }
4768diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h
4769index 7047051..44e8675 100644
4770--- a/arch/arm64/include/asm/atomic.h
4771+++ b/arch/arm64/include/asm/atomic.h
4772@@ -252,5 +252,15 @@ static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
4773 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
4774 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
4775
4776+#define atomic64_read_unchecked(v) atomic64_read(v)
4777+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
4778+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
4779+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
4780+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
4781+#define atomic64_inc_unchecked(v) atomic64_inc(v)
4782+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
4783+#define atomic64_dec_unchecked(v) atomic64_dec(v)
4784+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
4785+
4786 #endif
4787 #endif
4788diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
4789index a5abb00..9cbca9a 100644
4790--- a/arch/arm64/include/asm/barrier.h
4791+++ b/arch/arm64/include/asm/barrier.h
4792@@ -44,7 +44,7 @@
4793 do { \
4794 compiletime_assert_atomic_type(*p); \
4795 barrier(); \
4796- ACCESS_ONCE(*p) = (v); \
4797+ ACCESS_ONCE_RW(*p) = (v); \
4798 } while (0)
4799
4800 #define smp_load_acquire(p) \
4801diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h
4802index 4fde8c1..441f84f 100644
4803--- a/arch/arm64/include/asm/percpu.h
4804+++ b/arch/arm64/include/asm/percpu.h
4805@@ -135,16 +135,16 @@ static inline void __percpu_write(void *ptr, unsigned long val, int size)
4806 {
4807 switch (size) {
4808 case 1:
4809- ACCESS_ONCE(*(u8 *)ptr) = (u8)val;
4810+ ACCESS_ONCE_RW(*(u8 *)ptr) = (u8)val;
4811 break;
4812 case 2:
4813- ACCESS_ONCE(*(u16 *)ptr) = (u16)val;
4814+ ACCESS_ONCE_RW(*(u16 *)ptr) = (u16)val;
4815 break;
4816 case 4:
4817- ACCESS_ONCE(*(u32 *)ptr) = (u32)val;
4818+ ACCESS_ONCE_RW(*(u32 *)ptr) = (u32)val;
4819 break;
4820 case 8:
4821- ACCESS_ONCE(*(u64 *)ptr) = (u64)val;
4822+ ACCESS_ONCE_RW(*(u64 *)ptr) = (u64)val;
4823 break;
4824 default:
4825 BUILD_BUG();
4826diff --git a/arch/arm64/include/asm/pgalloc.h b/arch/arm64/include/asm/pgalloc.h
4827index e20df38..027ede3 100644
4828--- a/arch/arm64/include/asm/pgalloc.h
4829+++ b/arch/arm64/include/asm/pgalloc.h
4830@@ -46,6 +46,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
4831 set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
4832 }
4833
4834+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
4835+{
4836+ pud_populate(mm, pud, pmd);
4837+}
4838+
4839 #endif /* CONFIG_ARM64_PGTABLE_LEVELS > 2 */
4840
4841 #if CONFIG_ARM64_PGTABLE_LEVELS > 3
4842diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
4843index 07e1ba44..ec8cbbb 100644
4844--- a/arch/arm64/include/asm/uaccess.h
4845+++ b/arch/arm64/include/asm/uaccess.h
4846@@ -99,6 +99,7 @@ static inline void set_fs(mm_segment_t fs)
4847 flag; \
4848 })
4849
4850+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
4851 #define access_ok(type, addr, size) __range_ok(addr, size)
4852 #define user_addr_max get_fs
4853
4854diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
4855index b0bd4e5..54e82f6 100644
4856--- a/arch/arm64/mm/dma-mapping.c
4857+++ b/arch/arm64/mm/dma-mapping.c
4858@@ -134,7 +134,7 @@ static void __dma_free_coherent(struct device *dev, size_t size,
4859 phys_to_page(paddr),
4860 size >> PAGE_SHIFT);
4861 if (!freed)
4862- swiotlb_free_coherent(dev, size, vaddr, dma_handle);
4863+ swiotlb_free_coherent(dev, size, vaddr, dma_handle, attrs);
4864 }
4865
4866 static void *__dma_alloc(struct device *dev, size_t size,
4867diff --git a/arch/avr32/include/asm/cache.h b/arch/avr32/include/asm/cache.h
4868index c3a58a1..78fbf54 100644
4869--- a/arch/avr32/include/asm/cache.h
4870+++ b/arch/avr32/include/asm/cache.h
4871@@ -1,8 +1,10 @@
4872 #ifndef __ASM_AVR32_CACHE_H
4873 #define __ASM_AVR32_CACHE_H
4874
4875+#include <linux/const.h>
4876+
4877 #define L1_CACHE_SHIFT 5
4878-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4879+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4880
4881 /*
4882 * Memory returned by kmalloc() may be used for DMA, so we must make
4883diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
4884index d232888..87c8df1 100644
4885--- a/arch/avr32/include/asm/elf.h
4886+++ b/arch/avr32/include/asm/elf.h
4887@@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
4888 the loader. We need to make sure that it is out of the way of the program
4889 that it will "exec", and that there is sufficient room for the brk. */
4890
4891-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
4892+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
4893
4894+#ifdef CONFIG_PAX_ASLR
4895+#define PAX_ELF_ET_DYN_BASE 0x00001000UL
4896+
4897+#define PAX_DELTA_MMAP_LEN 15
4898+#define PAX_DELTA_STACK_LEN 15
4899+#endif
4900
4901 /* This yields a mask that user programs can use to figure out what
4902 instruction set this CPU supports. This could be done in user space,
4903diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
4904index 479330b..53717a8 100644
4905--- a/arch/avr32/include/asm/kmap_types.h
4906+++ b/arch/avr32/include/asm/kmap_types.h
4907@@ -2,9 +2,9 @@
4908 #define __ASM_AVR32_KMAP_TYPES_H
4909
4910 #ifdef CONFIG_DEBUG_HIGHMEM
4911-# define KM_TYPE_NR 29
4912+# define KM_TYPE_NR 30
4913 #else
4914-# define KM_TYPE_NR 14
4915+# define KM_TYPE_NR 15
4916 #endif
4917
4918 #endif /* __ASM_AVR32_KMAP_TYPES_H */
4919diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
4920index d223a8b..69c5210 100644
4921--- a/arch/avr32/mm/fault.c
4922+++ b/arch/avr32/mm/fault.c
4923@@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
4924
4925 int exception_trace = 1;
4926
4927+#ifdef CONFIG_PAX_PAGEEXEC
4928+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
4929+{
4930+ unsigned long i;
4931+
4932+ printk(KERN_ERR "PAX: bytes at PC: ");
4933+ for (i = 0; i < 20; i++) {
4934+ unsigned char c;
4935+ if (get_user(c, (unsigned char *)pc+i))
4936+ printk(KERN_CONT "???????? ");
4937+ else
4938+ printk(KERN_CONT "%02x ", c);
4939+ }
4940+ printk("\n");
4941+}
4942+#endif
4943+
4944 /*
4945 * This routine handles page faults. It determines the address and the
4946 * problem, and then passes it off to one of the appropriate routines.
4947@@ -178,6 +195,16 @@ bad_area:
4948 up_read(&mm->mmap_sem);
4949
4950 if (user_mode(regs)) {
4951+
4952+#ifdef CONFIG_PAX_PAGEEXEC
4953+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
4954+ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
4955+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
4956+ do_group_exit(SIGKILL);
4957+ }
4958+ }
4959+#endif
4960+
4961 if (exception_trace && printk_ratelimit())
4962 printk("%s%s[%d]: segfault at %08lx pc %08lx "
4963 "sp %08lx ecr %lu\n",
4964diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h
4965index 568885a..f8008df 100644
4966--- a/arch/blackfin/include/asm/cache.h
4967+++ b/arch/blackfin/include/asm/cache.h
4968@@ -7,6 +7,7 @@
4969 #ifndef __ARCH_BLACKFIN_CACHE_H
4970 #define __ARCH_BLACKFIN_CACHE_H
4971
4972+#include <linux/const.h>
4973 #include <linux/linkage.h> /* for asmlinkage */
4974
4975 /*
4976@@ -14,7 +15,7 @@
4977 * Blackfin loads 32 bytes for cache
4978 */
4979 #define L1_CACHE_SHIFT 5
4980-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4981+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4982 #define SMP_CACHE_BYTES L1_CACHE_BYTES
4983
4984 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
4985diff --git a/arch/cris/include/arch-v10/arch/cache.h b/arch/cris/include/arch-v10/arch/cache.h
4986index aea2718..3639a60 100644
4987--- a/arch/cris/include/arch-v10/arch/cache.h
4988+++ b/arch/cris/include/arch-v10/arch/cache.h
4989@@ -1,8 +1,9 @@
4990 #ifndef _ASM_ARCH_CACHE_H
4991 #define _ASM_ARCH_CACHE_H
4992
4993+#include <linux/const.h>
4994 /* Etrax 100LX have 32-byte cache-lines. */
4995-#define L1_CACHE_BYTES 32
4996 #define L1_CACHE_SHIFT 5
4997+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4998
4999 #endif /* _ASM_ARCH_CACHE_H */
5000diff --git a/arch/cris/include/arch-v32/arch/cache.h b/arch/cris/include/arch-v32/arch/cache.h
5001index 7caf25d..ee65ac5 100644
5002--- a/arch/cris/include/arch-v32/arch/cache.h
5003+++ b/arch/cris/include/arch-v32/arch/cache.h
5004@@ -1,11 +1,12 @@
5005 #ifndef _ASM_CRIS_ARCH_CACHE_H
5006 #define _ASM_CRIS_ARCH_CACHE_H
5007
5008+#include <linux/const.h>
5009 #include <arch/hwregs/dma.h>
5010
5011 /* A cache-line is 32 bytes. */
5012-#define L1_CACHE_BYTES 32
5013 #define L1_CACHE_SHIFT 5
5014+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5015
5016 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
5017
5018diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
5019index 102190a..5334cea 100644
5020--- a/arch/frv/include/asm/atomic.h
5021+++ b/arch/frv/include/asm/atomic.h
5022@@ -181,6 +181,16 @@ static inline void atomic64_dec(atomic64_t *v)
5023 #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter))
5024 #define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter))
5025
5026+#define atomic64_read_unchecked(v) atomic64_read(v)
5027+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
5028+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
5029+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
5030+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
5031+#define atomic64_inc_unchecked(v) atomic64_inc(v)
5032+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
5033+#define atomic64_dec_unchecked(v) atomic64_dec(v)
5034+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
5035+
5036 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
5037 {
5038 int c, old;
5039diff --git a/arch/frv/include/asm/cache.h b/arch/frv/include/asm/cache.h
5040index 2797163..c2a401df9 100644
5041--- a/arch/frv/include/asm/cache.h
5042+++ b/arch/frv/include/asm/cache.h
5043@@ -12,10 +12,11 @@
5044 #ifndef __ASM_CACHE_H
5045 #define __ASM_CACHE_H
5046
5047+#include <linux/const.h>
5048
5049 /* bytes per L1 cache line */
5050 #define L1_CACHE_SHIFT (CONFIG_FRV_L1_CACHE_SHIFT)
5051-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5052+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5053
5054 #define __cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
5055 #define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
5056diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
5057index 43901f2..0d8b865 100644
5058--- a/arch/frv/include/asm/kmap_types.h
5059+++ b/arch/frv/include/asm/kmap_types.h
5060@@ -2,6 +2,6 @@
5061 #ifndef _ASM_KMAP_TYPES_H
5062 #define _ASM_KMAP_TYPES_H
5063
5064-#define KM_TYPE_NR 17
5065+#define KM_TYPE_NR 18
5066
5067 #endif
5068diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
5069index 836f147..4cf23f5 100644
5070--- a/arch/frv/mm/elf-fdpic.c
5071+++ b/arch/frv/mm/elf-fdpic.c
5072@@ -61,6 +61,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5073 {
5074 struct vm_area_struct *vma;
5075 struct vm_unmapped_area_info info;
5076+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
5077
5078 if (len > TASK_SIZE)
5079 return -ENOMEM;
5080@@ -73,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5081 if (addr) {
5082 addr = PAGE_ALIGN(addr);
5083 vma = find_vma(current->mm, addr);
5084- if (TASK_SIZE - len >= addr &&
5085- (!vma || addr + len <= vma->vm_start))
5086+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
5087 goto success;
5088 }
5089
5090@@ -85,6 +85,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5091 info.high_limit = (current->mm->start_stack - 0x00200000);
5092 info.align_mask = 0;
5093 info.align_offset = 0;
5094+ info.threadstack_offset = offset;
5095 addr = vm_unmapped_area(&info);
5096 if (!(addr & ~PAGE_MASK))
5097 goto success;
5098diff --git a/arch/hexagon/include/asm/cache.h b/arch/hexagon/include/asm/cache.h
5099index 69952c18..4fa2908 100644
5100--- a/arch/hexagon/include/asm/cache.h
5101+++ b/arch/hexagon/include/asm/cache.h
5102@@ -21,9 +21,11 @@
5103 #ifndef __ASM_CACHE_H
5104 #define __ASM_CACHE_H
5105
5106+#include <linux/const.h>
5107+
5108 /* Bytes per L1 cache line */
5109-#define L1_CACHE_SHIFT (5)
5110-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5111+#define L1_CACHE_SHIFT 5
5112+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5113
5114 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
5115
5116diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
5117index 074e52b..76afdac 100644
5118--- a/arch/ia64/Kconfig
5119+++ b/arch/ia64/Kconfig
5120@@ -548,6 +548,7 @@ source "drivers/sn/Kconfig"
5121 config KEXEC
5122 bool "kexec system call"
5123 depends on !IA64_HP_SIM && (!SMP || HOTPLUG_CPU)
5124+ depends on !GRKERNSEC_KMEM
5125 help
5126 kexec is a system call that implements the ability to shutdown your
5127 current kernel, and to start another kernel. It is like a reboot
5128diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile
5129index 970d0bd..e750b9b 100644
5130--- a/arch/ia64/Makefile
5131+++ b/arch/ia64/Makefile
5132@@ -98,5 +98,6 @@ endef
5133 archprepare: make_nr_irqs_h FORCE
5134 PHONY += make_nr_irqs_h FORCE
5135
5136+make_nr_irqs_h: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
5137 make_nr_irqs_h: FORCE
5138 $(Q)$(MAKE) $(build)=arch/ia64/kernel include/generated/nr-irqs.h
5139diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
5140index 0bf0350..2ad1957 100644
5141--- a/arch/ia64/include/asm/atomic.h
5142+++ b/arch/ia64/include/asm/atomic.h
5143@@ -193,4 +193,14 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
5144 #define atomic64_inc(v) atomic64_add(1, (v))
5145 #define atomic64_dec(v) atomic64_sub(1, (v))
5146
5147+#define atomic64_read_unchecked(v) atomic64_read(v)
5148+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
5149+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
5150+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
5151+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
5152+#define atomic64_inc_unchecked(v) atomic64_inc(v)
5153+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
5154+#define atomic64_dec_unchecked(v) atomic64_dec(v)
5155+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
5156+
5157 #endif /* _ASM_IA64_ATOMIC_H */
5158diff --git a/arch/ia64/include/asm/barrier.h b/arch/ia64/include/asm/barrier.h
5159index f6769eb..1cdb590 100644
5160--- a/arch/ia64/include/asm/barrier.h
5161+++ b/arch/ia64/include/asm/barrier.h
5162@@ -66,7 +66,7 @@
5163 do { \
5164 compiletime_assert_atomic_type(*p); \
5165 barrier(); \
5166- ACCESS_ONCE(*p) = (v); \
5167+ ACCESS_ONCE_RW(*p) = (v); \
5168 } while (0)
5169
5170 #define smp_load_acquire(p) \
5171diff --git a/arch/ia64/include/asm/cache.h b/arch/ia64/include/asm/cache.h
5172index 988254a..e1ee885 100644
5173--- a/arch/ia64/include/asm/cache.h
5174+++ b/arch/ia64/include/asm/cache.h
5175@@ -1,6 +1,7 @@
5176 #ifndef _ASM_IA64_CACHE_H
5177 #define _ASM_IA64_CACHE_H
5178
5179+#include <linux/const.h>
5180
5181 /*
5182 * Copyright (C) 1998-2000 Hewlett-Packard Co
5183@@ -9,7 +10,7 @@
5184
5185 /* Bytes per L1 (data) cache line. */
5186 #define L1_CACHE_SHIFT CONFIG_IA64_L1_CACHE_SHIFT
5187-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5188+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5189
5190 #ifdef CONFIG_SMP
5191 # define SMP_CACHE_SHIFT L1_CACHE_SHIFT
5192diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
5193index 5a83c5c..4d7f553 100644
5194--- a/arch/ia64/include/asm/elf.h
5195+++ b/arch/ia64/include/asm/elf.h
5196@@ -42,6 +42,13 @@
5197 */
5198 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
5199
5200+#ifdef CONFIG_PAX_ASLR
5201+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
5202+
5203+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
5204+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
5205+#endif
5206+
5207 #define PT_IA_64_UNWIND 0x70000001
5208
5209 /* IA-64 relocations: */
5210diff --git a/arch/ia64/include/asm/pgalloc.h b/arch/ia64/include/asm/pgalloc.h
5211index 5767cdf..7462574 100644
5212--- a/arch/ia64/include/asm/pgalloc.h
5213+++ b/arch/ia64/include/asm/pgalloc.h
5214@@ -39,6 +39,12 @@ pgd_populate(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
5215 pgd_val(*pgd_entry) = __pa(pud);
5216 }
5217
5218+static inline void
5219+pgd_populate_kernel(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
5220+{
5221+ pgd_populate(mm, pgd_entry, pud);
5222+}
5223+
5224 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
5225 {
5226 return quicklist_alloc(0, GFP_KERNEL, NULL);
5227@@ -57,6 +63,12 @@ pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
5228 pud_val(*pud_entry) = __pa(pmd);
5229 }
5230
5231+static inline void
5232+pud_populate_kernel(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
5233+{
5234+ pud_populate(mm, pud_entry, pmd);
5235+}
5236+
5237 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
5238 {
5239 return quicklist_alloc(0, GFP_KERNEL, NULL);
5240diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
5241index 7b6f880..ac8e008 100644
5242--- a/arch/ia64/include/asm/pgtable.h
5243+++ b/arch/ia64/include/asm/pgtable.h
5244@@ -12,7 +12,7 @@
5245 * David Mosberger-Tang <davidm@hpl.hp.com>
5246 */
5247
5248-
5249+#include <linux/const.h>
5250 #include <asm/mman.h>
5251 #include <asm/page.h>
5252 #include <asm/processor.h>
5253@@ -139,6 +139,17 @@
5254 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5255 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5256 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
5257+
5258+#ifdef CONFIG_PAX_PAGEEXEC
5259+# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
5260+# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5261+# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5262+#else
5263+# define PAGE_SHARED_NOEXEC PAGE_SHARED
5264+# define PAGE_READONLY_NOEXEC PAGE_READONLY
5265+# define PAGE_COPY_NOEXEC PAGE_COPY
5266+#endif
5267+
5268 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
5269 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
5270 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
5271diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
5272index 45698cd..e8e2dbc 100644
5273--- a/arch/ia64/include/asm/spinlock.h
5274+++ b/arch/ia64/include/asm/spinlock.h
5275@@ -71,7 +71,7 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
5276 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
5277
5278 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
5279- ACCESS_ONCE(*p) = (tmp + 2) & ~1;
5280+ ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
5281 }
5282
5283 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
5284diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
5285index 4f3fb6cc..254055e 100644
5286--- a/arch/ia64/include/asm/uaccess.h
5287+++ b/arch/ia64/include/asm/uaccess.h
5288@@ -70,6 +70,7 @@
5289 && ((segment).seg == KERNEL_DS.seg \
5290 || likely(REGION_OFFSET((unsigned long) (addr)) < RGN_MAP_LIMIT))); \
5291 })
5292+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
5293 #define access_ok(type, addr, size) __access_ok((addr), (size), get_fs())
5294
5295 /*
5296@@ -241,12 +242,24 @@ extern unsigned long __must_check __copy_user (void __user *to, const void __use
5297 static inline unsigned long
5298 __copy_to_user (void __user *to, const void *from, unsigned long count)
5299 {
5300+ if (count > INT_MAX)
5301+ return count;
5302+
5303+ if (!__builtin_constant_p(count))
5304+ check_object_size(from, count, true);
5305+
5306 return __copy_user(to, (__force void __user *) from, count);
5307 }
5308
5309 static inline unsigned long
5310 __copy_from_user (void *to, const void __user *from, unsigned long count)
5311 {
5312+ if (count > INT_MAX)
5313+ return count;
5314+
5315+ if (!__builtin_constant_p(count))
5316+ check_object_size(to, count, false);
5317+
5318 return __copy_user((__force void __user *) to, from, count);
5319 }
5320
5321@@ -256,10 +269,13 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
5322 ({ \
5323 void __user *__cu_to = (to); \
5324 const void *__cu_from = (from); \
5325- long __cu_len = (n); \
5326+ unsigned long __cu_len = (n); \
5327 \
5328- if (__access_ok(__cu_to, __cu_len, get_fs())) \
5329+ if (__cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) { \
5330+ if (!__builtin_constant_p(n)) \
5331+ check_object_size(__cu_from, __cu_len, true); \
5332 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
5333+ } \
5334 __cu_len; \
5335 })
5336
5337@@ -267,11 +283,14 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
5338 ({ \
5339 void *__cu_to = (to); \
5340 const void __user *__cu_from = (from); \
5341- long __cu_len = (n); \
5342+ unsigned long __cu_len = (n); \
5343 \
5344 __chk_user_ptr(__cu_from); \
5345- if (__access_ok(__cu_from, __cu_len, get_fs())) \
5346+ if (__cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) { \
5347+ if (!__builtin_constant_p(n)) \
5348+ check_object_size(__cu_to, __cu_len, false); \
5349 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
5350+ } \
5351 __cu_len; \
5352 })
5353
5354diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
5355index 29754aa..06d2838 100644
5356--- a/arch/ia64/kernel/module.c
5357+++ b/arch/ia64/kernel/module.c
5358@@ -492,15 +492,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
5359 }
5360
5361 static inline int
5362+in_init_rx (const struct module *mod, uint64_t addr)
5363+{
5364+ return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
5365+}
5366+
5367+static inline int
5368+in_init_rw (const struct module *mod, uint64_t addr)
5369+{
5370+ return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
5371+}
5372+
5373+static inline int
5374 in_init (const struct module *mod, uint64_t addr)
5375 {
5376- return addr - (uint64_t) mod->module_init < mod->init_size;
5377+ return in_init_rx(mod, addr) || in_init_rw(mod, addr);
5378+}
5379+
5380+static inline int
5381+in_core_rx (const struct module *mod, uint64_t addr)
5382+{
5383+ return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
5384+}
5385+
5386+static inline int
5387+in_core_rw (const struct module *mod, uint64_t addr)
5388+{
5389+ return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
5390 }
5391
5392 static inline int
5393 in_core (const struct module *mod, uint64_t addr)
5394 {
5395- return addr - (uint64_t) mod->module_core < mod->core_size;
5396+ return in_core_rx(mod, addr) || in_core_rw(mod, addr);
5397 }
5398
5399 static inline int
5400@@ -683,7 +707,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
5401 break;
5402
5403 case RV_BDREL:
5404- val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
5405+ if (in_init_rx(mod, val))
5406+ val -= (uint64_t) mod->module_init_rx;
5407+ else if (in_init_rw(mod, val))
5408+ val -= (uint64_t) mod->module_init_rw;
5409+ else if (in_core_rx(mod, val))
5410+ val -= (uint64_t) mod->module_core_rx;
5411+ else if (in_core_rw(mod, val))
5412+ val -= (uint64_t) mod->module_core_rw;
5413 break;
5414
5415 case RV_LTV:
5416@@ -818,15 +849,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
5417 * addresses have been selected...
5418 */
5419 uint64_t gp;
5420- if (mod->core_size > MAX_LTOFF)
5421+ if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
5422 /*
5423 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
5424 * at the end of the module.
5425 */
5426- gp = mod->core_size - MAX_LTOFF / 2;
5427+ gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
5428 else
5429- gp = mod->core_size / 2;
5430- gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
5431+ gp = (mod->core_size_rx + mod->core_size_rw) / 2;
5432+ gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
5433 mod->arch.gp = gp;
5434 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
5435 }
5436diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c
5437index c39c3cd..3c77738 100644
5438--- a/arch/ia64/kernel/palinfo.c
5439+++ b/arch/ia64/kernel/palinfo.c
5440@@ -980,7 +980,7 @@ static int palinfo_cpu_callback(struct notifier_block *nfb,
5441 return NOTIFY_OK;
5442 }
5443
5444-static struct notifier_block __refdata palinfo_cpu_notifier =
5445+static struct notifier_block palinfo_cpu_notifier =
5446 {
5447 .notifier_call = palinfo_cpu_callback,
5448 .priority = 0,
5449diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
5450index 41e33f8..65180b2a 100644
5451--- a/arch/ia64/kernel/sys_ia64.c
5452+++ b/arch/ia64/kernel/sys_ia64.c
5453@@ -28,6 +28,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5454 unsigned long align_mask = 0;
5455 struct mm_struct *mm = current->mm;
5456 struct vm_unmapped_area_info info;
5457+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
5458
5459 if (len > RGN_MAP_LIMIT)
5460 return -ENOMEM;
5461@@ -43,6 +44,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5462 if (REGION_NUMBER(addr) == RGN_HPAGE)
5463 addr = 0;
5464 #endif
5465+
5466+#ifdef CONFIG_PAX_RANDMMAP
5467+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5468+ addr = mm->free_area_cache;
5469+ else
5470+#endif
5471+
5472 if (!addr)
5473 addr = TASK_UNMAPPED_BASE;
5474
5475@@ -61,6 +69,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5476 info.high_limit = TASK_SIZE;
5477 info.align_mask = align_mask;
5478 info.align_offset = 0;
5479+ info.threadstack_offset = offset;
5480 return vm_unmapped_area(&info);
5481 }
5482
5483diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
5484index 84f8a52..7c76178 100644
5485--- a/arch/ia64/kernel/vmlinux.lds.S
5486+++ b/arch/ia64/kernel/vmlinux.lds.S
5487@@ -192,7 +192,7 @@ SECTIONS {
5488 /* Per-cpu data: */
5489 . = ALIGN(PERCPU_PAGE_SIZE);
5490 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
5491- __phys_per_cpu_start = __per_cpu_load;
5492+ __phys_per_cpu_start = per_cpu_load;
5493 /*
5494 * ensure percpu data fits
5495 * into percpu page size
5496diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
5497index ba5ba7a..36e9d3a 100644
5498--- a/arch/ia64/mm/fault.c
5499+++ b/arch/ia64/mm/fault.c
5500@@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned long address)
5501 return pte_present(pte);
5502 }
5503
5504+#ifdef CONFIG_PAX_PAGEEXEC
5505+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
5506+{
5507+ unsigned long i;
5508+
5509+ printk(KERN_ERR "PAX: bytes at PC: ");
5510+ for (i = 0; i < 8; i++) {
5511+ unsigned int c;
5512+ if (get_user(c, (unsigned int *)pc+i))
5513+ printk(KERN_CONT "???????? ");
5514+ else
5515+ printk(KERN_CONT "%08x ", c);
5516+ }
5517+ printk("\n");
5518+}
5519+#endif
5520+
5521 # define VM_READ_BIT 0
5522 # define VM_WRITE_BIT 1
5523 # define VM_EXEC_BIT 2
5524@@ -151,8 +168,21 @@ retry:
5525 if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE))))
5526 goto bad_area;
5527
5528- if ((vma->vm_flags & mask) != mask)
5529+ if ((vma->vm_flags & mask) != mask) {
5530+
5531+#ifdef CONFIG_PAX_PAGEEXEC
5532+ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
5533+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
5534+ goto bad_area;
5535+
5536+ up_read(&mm->mmap_sem);
5537+ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
5538+ do_group_exit(SIGKILL);
5539+ }
5540+#endif
5541+
5542 goto bad_area;
5543+ }
5544
5545 /*
5546 * If for any reason at all we couldn't handle the fault, make
5547diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
5548index 52b7604b..455cb85 100644
5549--- a/arch/ia64/mm/hugetlbpage.c
5550+++ b/arch/ia64/mm/hugetlbpage.c
5551@@ -143,6 +143,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
5552 unsigned long pgoff, unsigned long flags)
5553 {
5554 struct vm_unmapped_area_info info;
5555+ unsigned long offset = gr_rand_threadstack_offset(current->mm, file, flags);
5556
5557 if (len > RGN_MAP_LIMIT)
5558 return -ENOMEM;
5559@@ -166,6 +167,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
5560 info.high_limit = HPAGE_REGION_BASE + RGN_MAP_LIMIT;
5561 info.align_mask = PAGE_MASK & (HPAGE_SIZE - 1);
5562 info.align_offset = 0;
5563+ info.threadstack_offset = offset;
5564 return vm_unmapped_area(&info);
5565 }
5566
5567diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
5568index 6b33457..88b5124 100644
5569--- a/arch/ia64/mm/init.c
5570+++ b/arch/ia64/mm/init.c
5571@@ -120,6 +120,19 @@ ia64_init_addr_space (void)
5572 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
5573 vma->vm_end = vma->vm_start + PAGE_SIZE;
5574 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
5575+
5576+#ifdef CONFIG_PAX_PAGEEXEC
5577+ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
5578+ vma->vm_flags &= ~VM_EXEC;
5579+
5580+#ifdef CONFIG_PAX_MPROTECT
5581+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
5582+ vma->vm_flags &= ~VM_MAYEXEC;
5583+#endif
5584+
5585+ }
5586+#endif
5587+
5588 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
5589 down_write(&current->mm->mmap_sem);
5590 if (insert_vm_struct(current->mm, vma)) {
5591@@ -286,7 +299,7 @@ static int __init gate_vma_init(void)
5592 gate_vma.vm_start = FIXADDR_USER_START;
5593 gate_vma.vm_end = FIXADDR_USER_END;
5594 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
5595- gate_vma.vm_page_prot = __P101;
5596+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
5597
5598 return 0;
5599 }
5600diff --git a/arch/m32r/include/asm/cache.h b/arch/m32r/include/asm/cache.h
5601index 40b3ee98..8c2c112 100644
5602--- a/arch/m32r/include/asm/cache.h
5603+++ b/arch/m32r/include/asm/cache.h
5604@@ -1,8 +1,10 @@
5605 #ifndef _ASM_M32R_CACHE_H
5606 #define _ASM_M32R_CACHE_H
5607
5608+#include <linux/const.h>
5609+
5610 /* L1 cache line size */
5611 #define L1_CACHE_SHIFT 4
5612-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5613+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5614
5615 #endif /* _ASM_M32R_CACHE_H */
5616diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
5617index 82abd15..d95ae5d 100644
5618--- a/arch/m32r/lib/usercopy.c
5619+++ b/arch/m32r/lib/usercopy.c
5620@@ -14,6 +14,9 @@
5621 unsigned long
5622 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
5623 {
5624+ if ((long)n < 0)
5625+ return n;
5626+
5627 prefetch(from);
5628 if (access_ok(VERIFY_WRITE, to, n))
5629 __copy_user(to,from,n);
5630@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
5631 unsigned long
5632 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
5633 {
5634+ if ((long)n < 0)
5635+ return n;
5636+
5637 prefetchw(to);
5638 if (access_ok(VERIFY_READ, from, n))
5639 __copy_user_zeroing(to,from,n);
5640diff --git a/arch/m68k/include/asm/cache.h b/arch/m68k/include/asm/cache.h
5641index 0395c51..5f26031 100644
5642--- a/arch/m68k/include/asm/cache.h
5643+++ b/arch/m68k/include/asm/cache.h
5644@@ -4,9 +4,11 @@
5645 #ifndef __ARCH_M68K_CACHE_H
5646 #define __ARCH_M68K_CACHE_H
5647
5648+#include <linux/const.h>
5649+
5650 /* bytes per L1 cache line */
5651 #define L1_CACHE_SHIFT 4
5652-#define L1_CACHE_BYTES (1<< L1_CACHE_SHIFT)
5653+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5654
5655 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
5656
5657diff --git a/arch/metag/include/asm/barrier.h b/arch/metag/include/asm/barrier.h
5658index d703d8e..a8e2d70 100644
5659--- a/arch/metag/include/asm/barrier.h
5660+++ b/arch/metag/include/asm/barrier.h
5661@@ -90,7 +90,7 @@ static inline void fence(void)
5662 do { \
5663 compiletime_assert_atomic_type(*p); \
5664 smp_mb(); \
5665- ACCESS_ONCE(*p) = (v); \
5666+ ACCESS_ONCE_RW(*p) = (v); \
5667 } while (0)
5668
5669 #define smp_load_acquire(p) \
5670diff --git a/arch/metag/mm/hugetlbpage.c b/arch/metag/mm/hugetlbpage.c
5671index 7ca80ac..794ba72 100644
5672--- a/arch/metag/mm/hugetlbpage.c
5673+++ b/arch/metag/mm/hugetlbpage.c
5674@@ -194,6 +194,7 @@ hugetlb_get_unmapped_area_new_pmd(unsigned long len)
5675 info.high_limit = TASK_SIZE;
5676 info.align_mask = PAGE_MASK & HUGEPT_MASK;
5677 info.align_offset = 0;
5678+ info.threadstack_offset = 0;
5679 return vm_unmapped_area(&info);
5680 }
5681
5682diff --git a/arch/microblaze/include/asm/cache.h b/arch/microblaze/include/asm/cache.h
5683index 4efe96a..60e8699 100644
5684--- a/arch/microblaze/include/asm/cache.h
5685+++ b/arch/microblaze/include/asm/cache.h
5686@@ -13,11 +13,12 @@
5687 #ifndef _ASM_MICROBLAZE_CACHE_H
5688 #define _ASM_MICROBLAZE_CACHE_H
5689
5690+#include <linux/const.h>
5691 #include <asm/registers.h>
5692
5693 #define L1_CACHE_SHIFT 5
5694 /* word-granular cache in microblaze */
5695-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5696+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5697
5698 #define SMP_CACHE_BYTES L1_CACHE_BYTES
5699
5700diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
5701index 1a313c4..f27b613 100644
5702--- a/arch/mips/Kconfig
5703+++ b/arch/mips/Kconfig
5704@@ -2504,6 +2504,7 @@ source "kernel/Kconfig.preempt"
5705
5706 config KEXEC
5707 bool "Kexec system call"
5708+ depends on !GRKERNSEC_KMEM
5709 help
5710 kexec is a system call that implements the ability to shutdown your
5711 current kernel, and to start another kernel. It is like a reboot
5712diff --git a/arch/mips/cavium-octeon/dma-octeon.c b/arch/mips/cavium-octeon/dma-octeon.c
5713index d8960d4..77dbd31 100644
5714--- a/arch/mips/cavium-octeon/dma-octeon.c
5715+++ b/arch/mips/cavium-octeon/dma-octeon.c
5716@@ -199,7 +199,7 @@ static void octeon_dma_free_coherent(struct device *dev, size_t size,
5717 if (dma_release_from_coherent(dev, order, vaddr))
5718 return;
5719
5720- swiotlb_free_coherent(dev, size, vaddr, dma_handle);
5721+ swiotlb_free_coherent(dev, size, vaddr, dma_handle, attrs);
5722 }
5723
5724 static dma_addr_t octeon_unity_phys_to_dma(struct device *dev, phys_addr_t paddr)
5725diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
5726index 26d4363..3c9a82e 100644
5727--- a/arch/mips/include/asm/atomic.h
5728+++ b/arch/mips/include/asm/atomic.h
5729@@ -22,15 +22,39 @@
5730 #include <asm/cmpxchg.h>
5731 #include <asm/war.h>
5732
5733+#ifdef CONFIG_GENERIC_ATOMIC64
5734+#include <asm-generic/atomic64.h>
5735+#endif
5736+
5737 #define ATOMIC_INIT(i) { (i) }
5738
5739+#ifdef CONFIG_64BIT
5740+#define _ASM_EXTABLE(from, to) \
5741+" .section __ex_table,\"a\"\n" \
5742+" .dword " #from ", " #to"\n" \
5743+" .previous\n"
5744+#else
5745+#define _ASM_EXTABLE(from, to) \
5746+" .section __ex_table,\"a\"\n" \
5747+" .word " #from ", " #to"\n" \
5748+" .previous\n"
5749+#endif
5750+
5751 /*
5752 * atomic_read - read atomic variable
5753 * @v: pointer of type atomic_t
5754 *
5755 * Atomically reads the value of @v.
5756 */
5757-#define atomic_read(v) ACCESS_ONCE((v)->counter)
5758+static inline int atomic_read(const atomic_t *v)
5759+{
5760+ return ACCESS_ONCE(v->counter);
5761+}
5762+
5763+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
5764+{
5765+ return ACCESS_ONCE(v->counter);
5766+}
5767
5768 /*
5769 * atomic_set - set atomic variable
5770@@ -39,47 +63,77 @@
5771 *
5772 * Atomically sets the value of @v to @i.
5773 */
5774-#define atomic_set(v, i) ((v)->counter = (i))
5775+static inline void atomic_set(atomic_t *v, int i)
5776+{
5777+ v->counter = i;
5778+}
5779
5780-#define ATOMIC_OP(op, c_op, asm_op) \
5781-static __inline__ void atomic_##op(int i, atomic_t * v) \
5782+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
5783+{
5784+ v->counter = i;
5785+}
5786+
5787+#ifdef CONFIG_PAX_REFCOUNT
5788+#define __OVERFLOW_POST \
5789+ " b 4f \n" \
5790+ " .set noreorder \n" \
5791+ "3: b 5f \n" \
5792+ " move %0, %1 \n" \
5793+ " .set reorder \n"
5794+#define __OVERFLOW_EXTABLE \
5795+ "3:\n" \
5796+ _ASM_EXTABLE(2b, 3b)
5797+#else
5798+#define __OVERFLOW_POST
5799+#define __OVERFLOW_EXTABLE
5800+#endif
5801+
5802+#define __ATOMIC_OP(op, suffix, asm_op, extable) \
5803+static inline void atomic_##op##suffix(int i, atomic##suffix##_t * v) \
5804 { \
5805 if (kernel_uses_llsc && R10000_LLSC_WAR) { \
5806 int temp; \
5807 \
5808 __asm__ __volatile__( \
5809- " .set arch=r4000 \n" \
5810- "1: ll %0, %1 # atomic_" #op " \n" \
5811- " " #asm_op " %0, %2 \n" \
5812+ " .set mips3 \n" \
5813+ "1: ll %0, %1 # atomic_" #op #suffix "\n" \
5814+ "2: " #asm_op " %0, %2 \n" \
5815 " sc %0, %1 \n" \
5816 " beqzl %0, 1b \n" \
5817+ extable \
5818 " .set mips0 \n" \
5819 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
5820 : "Ir" (i)); \
5821 } else if (kernel_uses_llsc) { \
5822 int temp; \
5823 \
5824- do { \
5825- __asm__ __volatile__( \
5826- " .set "MIPS_ISA_LEVEL" \n" \
5827- " ll %0, %1 # atomic_" #op "\n" \
5828- " " #asm_op " %0, %2 \n" \
5829- " sc %0, %1 \n" \
5830- " .set mips0 \n" \
5831- : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
5832- : "Ir" (i)); \
5833- } while (unlikely(!temp)); \
5834+ __asm__ __volatile__( \
5835+ " .set "MIPS_ISA_LEVEL" \n" \
5836+ "1: ll %0, %1 # atomic_" #op #suffix "\n" \
5837+ "2: " #asm_op " %0, %2 \n" \
5838+ " sc %0, %1 \n" \
5839+ " beqz %0, 1b \n" \
5840+ extable \
5841+ " .set mips0 \n" \
5842+ : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
5843+ : "Ir" (i)); \
5844 } else { \
5845 unsigned long flags; \
5846 \
5847 raw_local_irq_save(flags); \
5848- v->counter c_op i; \
5849+ __asm__ __volatile__( \
5850+ "2: " #asm_op " %0, %1 \n" \
5851+ extable \
5852+ : "+r" (v->counter) : "Ir" (i)); \
5853 raw_local_irq_restore(flags); \
5854 } \
5855 }
5856
5857-#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
5858-static __inline__ int atomic_##op##_return(int i, atomic_t * v) \
5859+#define ATOMIC_OP(op, asm_op) __ATOMIC_OP(op, _unchecked, asm_op##u, ) \
5860+ __ATOMIC_OP(op, , asm_op, __OVERFLOW_EXTABLE)
5861+
5862+#define __ATOMIC_OP_RETURN(op, suffix, asm_op, post_op, extable) \
5863+static inline int atomic_##op##_return##suffix(int i, atomic##suffix##_t * v) \
5864 { \
5865 int result; \
5866 \
5867@@ -89,12 +143,15 @@ static __inline__ int atomic_##op##_return(int i, atomic_t * v) \
5868 int temp; \
5869 \
5870 __asm__ __volatile__( \
5871- " .set arch=r4000 \n" \
5872- "1: ll %1, %2 # atomic_" #op "_return \n" \
5873- " " #asm_op " %0, %1, %3 \n" \
5874+ " .set mips3 \n" \
5875+ "1: ll %1, %2 # atomic_" #op "_return" #suffix"\n" \
5876+ "2: " #asm_op " %0, %1, %3 \n" \
5877 " sc %0, %2 \n" \
5878 " beqzl %0, 1b \n" \
5879- " " #asm_op " %0, %1, %3 \n" \
5880+ post_op \
5881+ extable \
5882+ "4: " #asm_op " %0, %1, %3 \n" \
5883+ "5: \n" \
5884 " .set mips0 \n" \
5885 : "=&r" (result), "=&r" (temp), \
5886 "+" GCC_OFF_SMALL_ASM() (v->counter) \
5887@@ -102,26 +159,33 @@ static __inline__ int atomic_##op##_return(int i, atomic_t * v) \
5888 } else if (kernel_uses_llsc) { \
5889 int temp; \
5890 \
5891- do { \
5892- __asm__ __volatile__( \
5893- " .set "MIPS_ISA_LEVEL" \n" \
5894- " ll %1, %2 # atomic_" #op "_return \n" \
5895- " " #asm_op " %0, %1, %3 \n" \
5896- " sc %0, %2 \n" \
5897- " .set mips0 \n" \
5898- : "=&r" (result), "=&r" (temp), \
5899- "+" GCC_OFF_SMALL_ASM() (v->counter) \
5900- : "Ir" (i)); \
5901- } while (unlikely(!result)); \
5902+ __asm__ __volatile__( \
5903+ " .set "MIPS_ISA_LEVEL" \n" \
5904+ "1: ll %1, %2 # atomic_" #op "_return" #suffix "\n" \
5905+ "2: " #asm_op " %0, %1, %3 \n" \
5906+ " sc %0, %2 \n" \
5907+ post_op \
5908+ extable \
5909+ "4: " #asm_op " %0, %1, %3 \n" \
5910+ "5: \n" \
5911+ " .set mips0 \n" \
5912+ : "=&r" (result), "=&r" (temp), \
5913+ "+" GCC_OFF_SMALL_ASM() (v->counter) \
5914+ : "Ir" (i)); \
5915 \
5916 result = temp; result c_op i; \
5917 } else { \
5918 unsigned long flags; \
5919 \
5920 raw_local_irq_save(flags); \
5921- result = v->counter; \
5922- result c_op i; \
5923- v->counter = result; \
5924+ __asm__ __volatile__( \
5925+ " lw %0, %1 \n" \
5926+ "2: " #asm_op " %0, %1, %2 \n" \
5927+ " sw %0, %1 \n" \
5928+ "3: \n" \
5929+ extable \
5930+ : "=&r" (result), "+" GCC_OFF_SMALL_ASM() (v->counter) \
5931+ : "Ir" (i)); \
5932 raw_local_irq_restore(flags); \
5933 } \
5934 \
5935@@ -130,16 +194,21 @@ static __inline__ int atomic_##op##_return(int i, atomic_t * v) \
5936 return result; \
5937 }
5938
5939-#define ATOMIC_OPS(op, c_op, asm_op) \
5940- ATOMIC_OP(op, c_op, asm_op) \
5941- ATOMIC_OP_RETURN(op, c_op, asm_op)
5942+#define ATOMIC_OP_RETURN(op, asm_op) __ATOMIC_OP_RETURN(op, _unchecked, asm_op##u, , ) \
5943+ __ATOMIC_OP_RETURN(op, , asm_op, __OVERFLOW_POST, __OVERFLOW_EXTABLE)
5944
5945-ATOMIC_OPS(add, +=, addu)
5946-ATOMIC_OPS(sub, -=, subu)
5947+#define ATOMIC_OPS(op, asm_op) \
5948+ ATOMIC_OP(op, asm_op) \
5949+ ATOMIC_OP_RETURN(op, asm_op)
5950+
5951+ATOMIC_OPS(add, add)
5952+ATOMIC_OPS(sub, sub)
5953
5954 #undef ATOMIC_OPS
5955 #undef ATOMIC_OP_RETURN
5956+#undef __ATOMIC_OP_RETURN
5957 #undef ATOMIC_OP
5958+#undef __ATOMIC_OP
5959
5960 /*
5961 * atomic_sub_if_positive - conditionally subtract integer from atomic variable
5962@@ -149,7 +218,7 @@ ATOMIC_OPS(sub, -=, subu)
5963 * Atomically test @v and subtract @i if @v is greater or equal than @i.
5964 * The function returns the old value of @v minus @i.
5965 */
5966-static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
5967+static __inline__ int atomic_sub_if_positive(int i, atomic_t *v)
5968 {
5969 int result;
5970
5971@@ -159,7 +228,7 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
5972 int temp;
5973
5974 __asm__ __volatile__(
5975- " .set arch=r4000 \n"
5976+ " .set "MIPS_ISA_LEVEL" \n"
5977 "1: ll %1, %2 # atomic_sub_if_positive\n"
5978 " subu %0, %1, %3 \n"
5979 " bltz %0, 1f \n"
5980@@ -208,8 +277,26 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
5981 return result;
5982 }
5983
5984-#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
5985-#define atomic_xchg(v, new) (xchg(&((v)->counter), (new)))
5986+static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
5987+{
5988+ return cmpxchg(&v->counter, old, new);
5989+}
5990+
5991+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old,
5992+ int new)
5993+{
5994+ return cmpxchg(&(v->counter), old, new);
5995+}
5996+
5997+static inline int atomic_xchg(atomic_t *v, int new)
5998+{
5999+ return xchg(&v->counter, new);
6000+}
6001+
6002+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
6003+{
6004+ return xchg(&(v->counter), new);
6005+}
6006
6007 /**
6008 * __atomic_add_unless - add unless the number is a given value
6009@@ -237,6 +324,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6010
6011 #define atomic_dec_return(v) atomic_sub_return(1, (v))
6012 #define atomic_inc_return(v) atomic_add_return(1, (v))
6013+static __inline__ int atomic_inc_return_unchecked(atomic_unchecked_t *v)
6014+{
6015+ return atomic_add_return_unchecked(1, v);
6016+}
6017
6018 /*
6019 * atomic_sub_and_test - subtract value from variable and test result
6020@@ -258,6 +349,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6021 * other cases.
6022 */
6023 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
6024+static __inline__ int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
6025+{
6026+ return atomic_add_return_unchecked(1, v) == 0;
6027+}
6028
6029 /*
6030 * atomic_dec_and_test - decrement by 1 and test
6031@@ -282,6 +377,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6032 * Atomically increments @v by 1.
6033 */
6034 #define atomic_inc(v) atomic_add(1, (v))
6035+static __inline__ void atomic_inc_unchecked(atomic_unchecked_t *v)
6036+{
6037+ atomic_add_unchecked(1, v);
6038+}
6039
6040 /*
6041 * atomic_dec - decrement and test
6042@@ -290,6 +389,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6043 * Atomically decrements @v by 1.
6044 */
6045 #define atomic_dec(v) atomic_sub(1, (v))
6046+static __inline__ void atomic_dec_unchecked(atomic_unchecked_t *v)
6047+{
6048+ atomic_sub_unchecked(1, v);
6049+}
6050
6051 /*
6052 * atomic_add_negative - add and test if negative
6053@@ -311,54 +414,77 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6054 * @v: pointer of type atomic64_t
6055 *
6056 */
6057-#define atomic64_read(v) ACCESS_ONCE((v)->counter)
6058+static inline long atomic64_read(const atomic64_t *v)
6059+{
6060+ return ACCESS_ONCE(v->counter);
6061+}
6062+
6063+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
6064+{
6065+ return ACCESS_ONCE(v->counter);
6066+}
6067
6068 /*
6069 * atomic64_set - set atomic variable
6070 * @v: pointer of type atomic64_t
6071 * @i: required value
6072 */
6073-#define atomic64_set(v, i) ((v)->counter = (i))
6074+static inline void atomic64_set(atomic64_t *v, long i)
6075+{
6076+ v->counter = i;
6077+}
6078
6079-#define ATOMIC64_OP(op, c_op, asm_op) \
6080-static __inline__ void atomic64_##op(long i, atomic64_t * v) \
6081+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
6082+{
6083+ v->counter = i;
6084+}
6085+
6086+#define __ATOMIC64_OP(op, suffix, asm_op, extable) \
6087+static inline void atomic64_##op##suffix(long i, atomic64##suffix##_t * v) \
6088 { \
6089 if (kernel_uses_llsc && R10000_LLSC_WAR) { \
6090 long temp; \
6091 \
6092 __asm__ __volatile__( \
6093- " .set arch=r4000 \n" \
6094- "1: lld %0, %1 # atomic64_" #op " \n" \
6095- " " #asm_op " %0, %2 \n" \
6096+ " .set "MIPS_ISA_LEVEL" \n" \
6097+ "1: lld %0, %1 # atomic64_" #op #suffix "\n" \
6098+ "2: " #asm_op " %0, %2 \n" \
6099 " scd %0, %1 \n" \
6100 " beqzl %0, 1b \n" \
6101+ extable \
6102 " .set mips0 \n" \
6103 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
6104 : "Ir" (i)); \
6105 } else if (kernel_uses_llsc) { \
6106 long temp; \
6107 \
6108- do { \
6109- __asm__ __volatile__( \
6110- " .set "MIPS_ISA_LEVEL" \n" \
6111- " lld %0, %1 # atomic64_" #op "\n" \
6112- " " #asm_op " %0, %2 \n" \
6113- " scd %0, %1 \n" \
6114- " .set mips0 \n" \
6115- : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
6116- : "Ir" (i)); \
6117- } while (unlikely(!temp)); \
6118+ __asm__ __volatile__( \
6119+ " .set "MIPS_ISA_LEVEL" \n" \
6120+ "1: lld %0, %1 # atomic64_" #op #suffix "\n" \
6121+ "2: " #asm_op " %0, %2 \n" \
6122+ " scd %0, %1 \n" \
6123+ " beqz %0, 1b \n" \
6124+ extable \
6125+ " .set mips0 \n" \
6126+ : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
6127+ : "Ir" (i)); \
6128 } else { \
6129 unsigned long flags; \
6130 \
6131 raw_local_irq_save(flags); \
6132- v->counter c_op i; \
6133+ __asm__ __volatile__( \
6134+ "2: " #asm_op " %0, %1 \n" \
6135+ extable \
6136+ : "+" GCC_OFF_SMALL_ASM() (v->counter) : "Ir" (i)); \
6137 raw_local_irq_restore(flags); \
6138 } \
6139 }
6140
6141-#define ATOMIC64_OP_RETURN(op, c_op, asm_op) \
6142-static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
6143+#define ATOMIC64_OP(op, asm_op) __ATOMIC64_OP(op, _unchecked, asm_op##u, ) \
6144+ __ATOMIC64_OP(op, , asm_op, __OVERFLOW_EXTABLE)
6145+
6146+#define __ATOMIC64_OP_RETURN(op, suffix, asm_op, post_op, extable) \
6147+static inline long atomic64_##op##_return##suffix(long i, atomic64##suffix##_t * v)\
6148 { \
6149 long result; \
6150 \
6151@@ -368,12 +494,15 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
6152 long temp; \
6153 \
6154 __asm__ __volatile__( \
6155- " .set arch=r4000 \n" \
6156+ " .set mips3 \n" \
6157 "1: lld %1, %2 # atomic64_" #op "_return\n" \
6158- " " #asm_op " %0, %1, %3 \n" \
6159+ "2: " #asm_op " %0, %1, %3 \n" \
6160 " scd %0, %2 \n" \
6161 " beqzl %0, 1b \n" \
6162- " " #asm_op " %0, %1, %3 \n" \
6163+ post_op \
6164+ extable \
6165+ "4: " #asm_op " %0, %1, %3 \n" \
6166+ "5: \n" \
6167 " .set mips0 \n" \
6168 : "=&r" (result), "=&r" (temp), \
6169 "+" GCC_OFF_SMALL_ASM() (v->counter) \
6170@@ -381,27 +510,35 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
6171 } else if (kernel_uses_llsc) { \
6172 long temp; \
6173 \
6174- do { \
6175- __asm__ __volatile__( \
6176- " .set "MIPS_ISA_LEVEL" \n" \
6177- " lld %1, %2 # atomic64_" #op "_return\n" \
6178- " " #asm_op " %0, %1, %3 \n" \
6179- " scd %0, %2 \n" \
6180- " .set mips0 \n" \
6181- : "=&r" (result), "=&r" (temp), \
6182- "=" GCC_OFF_SMALL_ASM() (v->counter) \
6183- : "Ir" (i), GCC_OFF_SMALL_ASM() (v->counter) \
6184- : "memory"); \
6185- } while (unlikely(!result)); \
6186+ __asm__ __volatile__( \
6187+ " .set "MIPS_ISA_LEVEL" \n" \
6188+ "1: lld %1, %2 # atomic64_" #op "_return" #suffix "\n"\
6189+ "2: " #asm_op " %0, %1, %3 \n" \
6190+ " scd %0, %2 \n" \
6191+ " beqz %0, 1b \n" \
6192+ post_op \
6193+ extable \
6194+ "4: " #asm_op " %0, %1, %3 \n" \
6195+ "5: \n" \
6196+ " .set mips0 \n" \
6197+ : "=&r" (result), "=&r" (temp), \
6198+ "=" GCC_OFF_SMALL_ASM() (v->counter) \
6199+ : "Ir" (i), GCC_OFF_SMALL_ASM() (v->counter) \
6200+ : "memory"); \
6201 \
6202 result = temp; result c_op i; \
6203 } else { \
6204 unsigned long flags; \
6205 \
6206 raw_local_irq_save(flags); \
6207- result = v->counter; \
6208- result c_op i; \
6209- v->counter = result; \
6210+ __asm__ __volatile__( \
6211+ " ld %0, %1 \n" \
6212+ "2: " #asm_op " %0, %1, %2 \n" \
6213+ " sd %0, %1 \n" \
6214+ "3: \n" \
6215+ extable \
6216+ : "=&r" (result), "+" GCC_OFF_SMALL_ASM() (v->counter) \
6217+ : "Ir" (i)); \
6218 raw_local_irq_restore(flags); \
6219 } \
6220 \
6221@@ -410,16 +547,23 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
6222 return result; \
6223 }
6224
6225-#define ATOMIC64_OPS(op, c_op, asm_op) \
6226- ATOMIC64_OP(op, c_op, asm_op) \
6227- ATOMIC64_OP_RETURN(op, c_op, asm_op)
6228+#define ATOMIC64_OP_RETURN(op, asm_op) __ATOMIC64_OP_RETURN(op, _unchecked, asm_op##u, , ) \
6229+ __ATOMIC64_OP_RETURN(op, , asm_op, __OVERFLOW_POST, __OVERFLOW_EXTABLE)
6230
6231-ATOMIC64_OPS(add, +=, daddu)
6232-ATOMIC64_OPS(sub, -=, dsubu)
6233+#define ATOMIC64_OPS(op, asm_op) \
6234+ ATOMIC64_OP(op, asm_op) \
6235+ ATOMIC64_OP_RETURN(op, asm_op)
6236+
6237+ATOMIC64_OPS(add, dadd)
6238+ATOMIC64_OPS(sub, dsub)
6239
6240 #undef ATOMIC64_OPS
6241 #undef ATOMIC64_OP_RETURN
6242+#undef __ATOMIC64_OP_RETURN
6243 #undef ATOMIC64_OP
6244+#undef __ATOMIC64_OP
6245+#undef __OVERFLOW_EXTABLE
6246+#undef __OVERFLOW_POST
6247
6248 /*
6249 * atomic64_sub_if_positive - conditionally subtract integer from atomic
6250@@ -430,7 +574,7 @@ ATOMIC64_OPS(sub, -=, dsubu)
6251 * Atomically test @v and subtract @i if @v is greater or equal than @i.
6252 * The function returns the old value of @v minus @i.
6253 */
6254-static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
6255+static __inline__ long atomic64_sub_if_positive(long i, atomic64_t *v)
6256 {
6257 long result;
6258
6259@@ -440,7 +584,7 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
6260 long temp;
6261
6262 __asm__ __volatile__(
6263- " .set arch=r4000 \n"
6264+ " .set "MIPS_ISA_LEVEL" \n"
6265 "1: lld %1, %2 # atomic64_sub_if_positive\n"
6266 " dsubu %0, %1, %3 \n"
6267 " bltz %0, 1f \n"
6268@@ -489,9 +633,26 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
6269 return result;
6270 }
6271
6272-#define atomic64_cmpxchg(v, o, n) \
6273- ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
6274-#define atomic64_xchg(v, new) (xchg(&((v)->counter), (new)))
6275+static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
6276+{
6277+ return cmpxchg(&v->counter, old, new);
6278+}
6279+
6280+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old,
6281+ long new)
6282+{
6283+ return cmpxchg(&(v->counter), old, new);
6284+}
6285+
6286+static inline long atomic64_xchg(atomic64_t *v, long new)
6287+{
6288+ return xchg(&v->counter, new);
6289+}
6290+
6291+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
6292+{
6293+ return xchg(&(v->counter), new);
6294+}
6295
6296 /**
6297 * atomic64_add_unless - add unless the number is a given value
6298@@ -521,6 +682,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6299
6300 #define atomic64_dec_return(v) atomic64_sub_return(1, (v))
6301 #define atomic64_inc_return(v) atomic64_add_return(1, (v))
6302+#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1, (v))
6303
6304 /*
6305 * atomic64_sub_and_test - subtract value from variable and test result
6306@@ -542,6 +704,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6307 * other cases.
6308 */
6309 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
6310+#define atomic64_inc_and_test_unchecked(v) atomic64_add_return_unchecked(1, (v)) == 0)
6311
6312 /*
6313 * atomic64_dec_and_test - decrement by 1 and test
6314@@ -566,6 +729,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6315 * Atomically increments @v by 1.
6316 */
6317 #define atomic64_inc(v) atomic64_add(1, (v))
6318+#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1, (v))
6319
6320 /*
6321 * atomic64_dec - decrement and test
6322@@ -574,6 +738,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6323 * Atomically decrements @v by 1.
6324 */
6325 #define atomic64_dec(v) atomic64_sub(1, (v))
6326+#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1, (v))
6327
6328 /*
6329 * atomic64_add_negative - add and test if negative
6330diff --git a/arch/mips/include/asm/barrier.h b/arch/mips/include/asm/barrier.h
6331index 2b8bbbc..4556df6 100644
6332--- a/arch/mips/include/asm/barrier.h
6333+++ b/arch/mips/include/asm/barrier.h
6334@@ -133,7 +133,7 @@
6335 do { \
6336 compiletime_assert_atomic_type(*p); \
6337 smp_mb(); \
6338- ACCESS_ONCE(*p) = (v); \
6339+ ACCESS_ONCE_RW(*p) = (v); \
6340 } while (0)
6341
6342 #define smp_load_acquire(p) \
6343diff --git a/arch/mips/include/asm/cache.h b/arch/mips/include/asm/cache.h
6344index b4db69f..8f3b093 100644
6345--- a/arch/mips/include/asm/cache.h
6346+++ b/arch/mips/include/asm/cache.h
6347@@ -9,10 +9,11 @@
6348 #ifndef _ASM_CACHE_H
6349 #define _ASM_CACHE_H
6350
6351+#include <linux/const.h>
6352 #include <kmalloc.h>
6353
6354 #define L1_CACHE_SHIFT CONFIG_MIPS_L1_CACHE_SHIFT
6355-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
6356+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
6357
6358 #define SMP_CACHE_SHIFT L1_CACHE_SHIFT
6359 #define SMP_CACHE_BYTES L1_CACHE_BYTES
6360diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
6361index 694925a..990fa62 100644
6362--- a/arch/mips/include/asm/elf.h
6363+++ b/arch/mips/include/asm/elf.h
6364@@ -410,15 +410,18 @@ extern const char *__elf_platform;
6365 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
6366 #endif
6367
6368+#ifdef CONFIG_PAX_ASLR
6369+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
6370+
6371+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6372+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6373+#endif
6374+
6375 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
6376 struct linux_binprm;
6377 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
6378 int uses_interp);
6379
6380-struct mm_struct;
6381-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
6382-#define arch_randomize_brk arch_randomize_brk
6383-
6384 struct arch_elf_state {
6385 int fp_abi;
6386 int interp_fp_abi;
6387diff --git a/arch/mips/include/asm/exec.h b/arch/mips/include/asm/exec.h
6388index c1f6afa..38cc6e9 100644
6389--- a/arch/mips/include/asm/exec.h
6390+++ b/arch/mips/include/asm/exec.h
6391@@ -12,6 +12,6 @@
6392 #ifndef _ASM_EXEC_H
6393 #define _ASM_EXEC_H
6394
6395-extern unsigned long arch_align_stack(unsigned long sp);
6396+#define arch_align_stack(x) ((x) & ~0xfUL)
6397
6398 #endif /* _ASM_EXEC_H */
6399diff --git a/arch/mips/include/asm/hw_irq.h b/arch/mips/include/asm/hw_irq.h
6400index 9e8ef59..1139d6b 100644
6401--- a/arch/mips/include/asm/hw_irq.h
6402+++ b/arch/mips/include/asm/hw_irq.h
6403@@ -10,7 +10,7 @@
6404
6405 #include <linux/atomic.h>
6406
6407-extern atomic_t irq_err_count;
6408+extern atomic_unchecked_t irq_err_count;
6409
6410 /*
6411 * interrupt-retrigger: NOP for now. This may not be appropriate for all
6412diff --git a/arch/mips/include/asm/local.h b/arch/mips/include/asm/local.h
6413index 8feaed6..1bd8a64 100644
6414--- a/arch/mips/include/asm/local.h
6415+++ b/arch/mips/include/asm/local.h
6416@@ -13,15 +13,25 @@ typedef struct
6417 atomic_long_t a;
6418 } local_t;
6419
6420+typedef struct {
6421+ atomic_long_unchecked_t a;
6422+} local_unchecked_t;
6423+
6424 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
6425
6426 #define local_read(l) atomic_long_read(&(l)->a)
6427+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
6428 #define local_set(l, i) atomic_long_set(&(l)->a, (i))
6429+#define local_set_unchecked(l, i) atomic_long_set_unchecked(&(l)->a, (i))
6430
6431 #define local_add(i, l) atomic_long_add((i), (&(l)->a))
6432+#define local_add_unchecked(i, l) atomic_long_add_unchecked((i), (&(l)->a))
6433 #define local_sub(i, l) atomic_long_sub((i), (&(l)->a))
6434+#define local_sub_unchecked(i, l) atomic_long_sub_unchecked((i), (&(l)->a))
6435 #define local_inc(l) atomic_long_inc(&(l)->a)
6436+#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
6437 #define local_dec(l) atomic_long_dec(&(l)->a)
6438+#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
6439
6440 /*
6441 * Same as above, but return the result value
6442@@ -71,6 +81,51 @@ static __inline__ long local_add_return(long i, local_t * l)
6443 return result;
6444 }
6445
6446+static __inline__ long local_add_return_unchecked(long i, local_unchecked_t * l)
6447+{
6448+ unsigned long result;
6449+
6450+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
6451+ unsigned long temp;
6452+
6453+ __asm__ __volatile__(
6454+ " .set mips3 \n"
6455+ "1:" __LL "%1, %2 # local_add_return \n"
6456+ " addu %0, %1, %3 \n"
6457+ __SC "%0, %2 \n"
6458+ " beqzl %0, 1b \n"
6459+ " addu %0, %1, %3 \n"
6460+ " .set mips0 \n"
6461+ : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
6462+ : "Ir" (i), "m" (l->a.counter)
6463+ : "memory");
6464+ } else if (kernel_uses_llsc) {
6465+ unsigned long temp;
6466+
6467+ __asm__ __volatile__(
6468+ " .set mips3 \n"
6469+ "1:" __LL "%1, %2 # local_add_return \n"
6470+ " addu %0, %1, %3 \n"
6471+ __SC "%0, %2 \n"
6472+ " beqz %0, 1b \n"
6473+ " addu %0, %1, %3 \n"
6474+ " .set mips0 \n"
6475+ : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
6476+ : "Ir" (i), "m" (l->a.counter)
6477+ : "memory");
6478+ } else {
6479+ unsigned long flags;
6480+
6481+ local_irq_save(flags);
6482+ result = l->a.counter;
6483+ result += i;
6484+ l->a.counter = result;
6485+ local_irq_restore(flags);
6486+ }
6487+
6488+ return result;
6489+}
6490+
6491 static __inline__ long local_sub_return(long i, local_t * l)
6492 {
6493 unsigned long result;
6494@@ -118,6 +173,8 @@ static __inline__ long local_sub_return(long i, local_t * l)
6495
6496 #define local_cmpxchg(l, o, n) \
6497 ((long)cmpxchg_local(&((l)->a.counter), (o), (n)))
6498+#define local_cmpxchg_unchecked(l, o, n) \
6499+ ((long)cmpxchg_local(&((l)->a.counter), (o), (n)))
6500 #define local_xchg(l, n) (atomic_long_xchg((&(l)->a), (n)))
6501
6502 /**
6503diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
6504index 154b70a..426ae3d 100644
6505--- a/arch/mips/include/asm/page.h
6506+++ b/arch/mips/include/asm/page.h
6507@@ -120,7 +120,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
6508 #ifdef CONFIG_CPU_MIPS32
6509 typedef struct { unsigned long pte_low, pte_high; } pte_t;
6510 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
6511- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
6512+ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
6513 #else
6514 typedef struct { unsigned long long pte; } pte_t;
6515 #define pte_val(x) ((x).pte)
6516diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h
6517index b336037..5b874cc 100644
6518--- a/arch/mips/include/asm/pgalloc.h
6519+++ b/arch/mips/include/asm/pgalloc.h
6520@@ -37,6 +37,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
6521 {
6522 set_pud(pud, __pud((unsigned long)pmd));
6523 }
6524+
6525+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
6526+{
6527+ pud_populate(mm, pud, pmd);
6528+}
6529 #endif
6530
6531 /*
6532diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
6533index f8f809f..b5f3fa4 100644
6534--- a/arch/mips/include/asm/pgtable.h
6535+++ b/arch/mips/include/asm/pgtable.h
6536@@ -20,6 +20,9 @@
6537 #include <asm/io.h>
6538 #include <asm/pgtable-bits.h>
6539
6540+#define ktla_ktva(addr) (addr)
6541+#define ktva_ktla(addr) (addr)
6542+
6543 struct mm_struct;
6544 struct vm_area_struct;
6545
6546diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
6547index 55ed660..3dc9422 100644
6548--- a/arch/mips/include/asm/thread_info.h
6549+++ b/arch/mips/include/asm/thread_info.h
6550@@ -102,6 +102,9 @@ static inline struct thread_info *current_thread_info(void)
6551 #define TIF_SECCOMP 4 /* secure computing */
6552 #define TIF_NOTIFY_RESUME 5 /* callback before returning to user */
6553 #define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal() */
6554+/* li takes a 32bit immediate */
6555+#define TIF_GRSEC_SETXID 10 /* update credentials on syscall entry/exit */
6556+
6557 #define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */
6558 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
6559 #define TIF_NOHZ 19 /* in adaptive nohz mode */
6560@@ -137,14 +140,16 @@ static inline struct thread_info *current_thread_info(void)
6561 #define _TIF_USEDMSA (1<<TIF_USEDMSA)
6562 #define _TIF_MSA_CTX_LIVE (1<<TIF_MSA_CTX_LIVE)
6563 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
6564+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
6565
6566 #define _TIF_WORK_SYSCALL_ENTRY (_TIF_NOHZ | _TIF_SYSCALL_TRACE | \
6567 _TIF_SYSCALL_AUDIT | \
6568- _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
6569+ _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \
6570+ _TIF_GRSEC_SETXID)
6571
6572 /* work to do in syscall_trace_leave() */
6573 #define _TIF_WORK_SYSCALL_EXIT (_TIF_NOHZ | _TIF_SYSCALL_TRACE | \
6574- _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT)
6575+ _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
6576
6577 /* work to do on interrupt/exception return */
6578 #define _TIF_WORK_MASK \
6579@@ -152,7 +157,7 @@ static inline struct thread_info *current_thread_info(void)
6580 /* work to do on any return to u-space */
6581 #define _TIF_ALLWORK_MASK (_TIF_NOHZ | _TIF_WORK_MASK | \
6582 _TIF_WORK_SYSCALL_EXIT | \
6583- _TIF_SYSCALL_TRACEPOINT)
6584+ _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
6585
6586 /*
6587 * We stash processor id into a COP0 register to retrieve it fast
6588diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h
6589index bf8b324..cec5705 100644
6590--- a/arch/mips/include/asm/uaccess.h
6591+++ b/arch/mips/include/asm/uaccess.h
6592@@ -130,6 +130,7 @@ extern u64 __ua_limit;
6593 __ok == 0; \
6594 })
6595
6596+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
6597 #define access_ok(type, addr, size) \
6598 likely(__access_ok((addr), (size), __access_mask))
6599
6600diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
6601index 1188e00..41cf144 100644
6602--- a/arch/mips/kernel/binfmt_elfn32.c
6603+++ b/arch/mips/kernel/binfmt_elfn32.c
6604@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
6605 #undef ELF_ET_DYN_BASE
6606 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
6607
6608+#ifdef CONFIG_PAX_ASLR
6609+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
6610+
6611+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6612+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6613+#endif
6614+
6615 #include <asm/processor.h>
6616 #include <linux/module.h>
6617 #include <linux/elfcore.h>
6618diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
6619index 9287678..f870e47 100644
6620--- a/arch/mips/kernel/binfmt_elfo32.c
6621+++ b/arch/mips/kernel/binfmt_elfo32.c
6622@@ -70,6 +70,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
6623 #undef ELF_ET_DYN_BASE
6624 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
6625
6626+#ifdef CONFIG_PAX_ASLR
6627+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
6628+
6629+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6630+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6631+#endif
6632+
6633 #include <asm/processor.h>
6634
6635 #include <linux/module.h>
6636diff --git a/arch/mips/kernel/i8259.c b/arch/mips/kernel/i8259.c
6637index a74ec3a..4f06f18 100644
6638--- a/arch/mips/kernel/i8259.c
6639+++ b/arch/mips/kernel/i8259.c
6640@@ -202,7 +202,7 @@ spurious_8259A_irq:
6641 printk(KERN_DEBUG "spurious 8259A interrupt: IRQ%d.\n", irq);
6642 spurious_irq_mask |= irqmask;
6643 }
6644- atomic_inc(&irq_err_count);
6645+ atomic_inc_unchecked(&irq_err_count);
6646 /*
6647 * Theoretically we do not have to handle this IRQ,
6648 * but in Linux this does not cause problems and is
6649diff --git a/arch/mips/kernel/irq-gt641xx.c b/arch/mips/kernel/irq-gt641xx.c
6650index 44a1f79..2bd6aa3 100644
6651--- a/arch/mips/kernel/irq-gt641xx.c
6652+++ b/arch/mips/kernel/irq-gt641xx.c
6653@@ -110,7 +110,7 @@ void gt641xx_irq_dispatch(void)
6654 }
6655 }
6656
6657- atomic_inc(&irq_err_count);
6658+ atomic_inc_unchecked(&irq_err_count);
6659 }
6660
6661 void __init gt641xx_irq_init(void)
6662diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
6663index be15e52..a089cc4 100644
6664--- a/arch/mips/kernel/irq.c
6665+++ b/arch/mips/kernel/irq.c
6666@@ -76,17 +76,17 @@ void ack_bad_irq(unsigned int irq)
6667 printk("unexpected IRQ # %d\n", irq);
6668 }
6669
6670-atomic_t irq_err_count;
6671+atomic_unchecked_t irq_err_count;
6672
6673 int arch_show_interrupts(struct seq_file *p, int prec)
6674 {
6675- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
6676+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
6677 return 0;
6678 }
6679
6680 asmlinkage void spurious_interrupt(void)
6681 {
6682- atomic_inc(&irq_err_count);
6683+ atomic_inc_unchecked(&irq_err_count);
6684 }
6685
6686 void __init init_IRQ(void)
6687@@ -110,6 +110,8 @@ void __init init_IRQ(void)
6688 }
6689
6690 #ifdef CONFIG_DEBUG_STACKOVERFLOW
6691+
6692+extern void gr_handle_kernel_exploit(void);
6693 static inline void check_stack_overflow(void)
6694 {
6695 unsigned long sp;
6696@@ -125,6 +127,7 @@ static inline void check_stack_overflow(void)
6697 printk("do_IRQ: stack overflow: %ld\n",
6698 sp - sizeof(struct thread_info));
6699 dump_stack();
6700+ gr_handle_kernel_exploit();
6701 }
6702 }
6703 #else
6704diff --git a/arch/mips/kernel/pm-cps.c b/arch/mips/kernel/pm-cps.c
6705index 0614717..002fa43 100644
6706--- a/arch/mips/kernel/pm-cps.c
6707+++ b/arch/mips/kernel/pm-cps.c
6708@@ -172,7 +172,7 @@ int cps_pm_enter_state(enum cps_pm_state state)
6709 nc_core_ready_count = nc_addr;
6710
6711 /* Ensure ready_count is zero-initialised before the assembly runs */
6712- ACCESS_ONCE(*nc_core_ready_count) = 0;
6713+ ACCESS_ONCE_RW(*nc_core_ready_count) = 0;
6714 coupled_barrier(&per_cpu(pm_barrier, core), online);
6715
6716 /* Run the generated entry code */
6717diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
6718index bf85cc1..b365c61 100644
6719--- a/arch/mips/kernel/process.c
6720+++ b/arch/mips/kernel/process.c
6721@@ -535,18 +535,6 @@ out:
6722 return pc;
6723 }
6724
6725-/*
6726- * Don't forget that the stack pointer must be aligned on a 8 bytes
6727- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
6728- */
6729-unsigned long arch_align_stack(unsigned long sp)
6730-{
6731- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
6732- sp -= get_random_int() & ~PAGE_MASK;
6733-
6734- return sp & ALMASK;
6735-}
6736-
6737 static void arch_dump_stack(void *info)
6738 {
6739 struct pt_regs *regs;
6740diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
6741index 5104528..950bbdc 100644
6742--- a/arch/mips/kernel/ptrace.c
6743+++ b/arch/mips/kernel/ptrace.c
6744@@ -761,6 +761,10 @@ long arch_ptrace(struct task_struct *child, long request,
6745 return ret;
6746 }
6747
6748+#ifdef CONFIG_GRKERNSEC_SETXID
6749+extern void gr_delayed_cred_worker(void);
6750+#endif
6751+
6752 /*
6753 * Notification of system call entry/exit
6754 * - triggered by current->work.syscall_trace
6755@@ -779,6 +783,11 @@ asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall)
6756 tracehook_report_syscall_entry(regs))
6757 ret = -1;
6758
6759+#ifdef CONFIG_GRKERNSEC_SETXID
6760+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
6761+ gr_delayed_cred_worker();
6762+#endif
6763+
6764 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
6765 trace_sys_enter(regs, regs->regs[2]);
6766
6767diff --git a/arch/mips/kernel/reset.c b/arch/mips/kernel/reset.c
6768index 07fc524..b9d7f28 100644
6769--- a/arch/mips/kernel/reset.c
6770+++ b/arch/mips/kernel/reset.c
6771@@ -13,6 +13,7 @@
6772 #include <linux/reboot.h>
6773
6774 #include <asm/reboot.h>
6775+#include <asm/bug.h>
6776
6777 /*
6778 * Urgs ... Too many MIPS machines to handle this in a generic way.
6779@@ -29,16 +30,19 @@ void machine_restart(char *command)
6780 {
6781 if (_machine_restart)
6782 _machine_restart(command);
6783+ BUG();
6784 }
6785
6786 void machine_halt(void)
6787 {
6788 if (_machine_halt)
6789 _machine_halt();
6790+ BUG();
6791 }
6792
6793 void machine_power_off(void)
6794 {
6795 if (pm_power_off)
6796 pm_power_off();
6797+ BUG();
6798 }
6799diff --git a/arch/mips/kernel/sync-r4k.c b/arch/mips/kernel/sync-r4k.c
6800index 2242bdd..b284048 100644
6801--- a/arch/mips/kernel/sync-r4k.c
6802+++ b/arch/mips/kernel/sync-r4k.c
6803@@ -18,8 +18,8 @@
6804 #include <asm/mipsregs.h>
6805
6806 static atomic_t count_start_flag = ATOMIC_INIT(0);
6807-static atomic_t count_count_start = ATOMIC_INIT(0);
6808-static atomic_t count_count_stop = ATOMIC_INIT(0);
6809+static atomic_unchecked_t count_count_start = ATOMIC_INIT(0);
6810+static atomic_unchecked_t count_count_stop = ATOMIC_INIT(0);
6811 static atomic_t count_reference = ATOMIC_INIT(0);
6812
6813 #define COUNTON 100
6814@@ -58,13 +58,13 @@ void synchronise_count_master(int cpu)
6815
6816 for (i = 0; i < NR_LOOPS; i++) {
6817 /* slaves loop on '!= 2' */
6818- while (atomic_read(&count_count_start) != 1)
6819+ while (atomic_read_unchecked(&count_count_start) != 1)
6820 mb();
6821- atomic_set(&count_count_stop, 0);
6822+ atomic_set_unchecked(&count_count_stop, 0);
6823 smp_wmb();
6824
6825 /* this lets the slaves write their count register */
6826- atomic_inc(&count_count_start);
6827+ atomic_inc_unchecked(&count_count_start);
6828
6829 /*
6830 * Everyone initialises count in the last loop:
6831@@ -75,11 +75,11 @@ void synchronise_count_master(int cpu)
6832 /*
6833 * Wait for all slaves to leave the synchronization point:
6834 */
6835- while (atomic_read(&count_count_stop) != 1)
6836+ while (atomic_read_unchecked(&count_count_stop) != 1)
6837 mb();
6838- atomic_set(&count_count_start, 0);
6839+ atomic_set_unchecked(&count_count_start, 0);
6840 smp_wmb();
6841- atomic_inc(&count_count_stop);
6842+ atomic_inc_unchecked(&count_count_stop);
6843 }
6844 /* Arrange for an interrupt in a short while */
6845 write_c0_compare(read_c0_count() + COUNTON);
6846@@ -112,8 +112,8 @@ void synchronise_count_slave(int cpu)
6847 initcount = atomic_read(&count_reference);
6848
6849 for (i = 0; i < NR_LOOPS; i++) {
6850- atomic_inc(&count_count_start);
6851- while (atomic_read(&count_count_start) != 2)
6852+ atomic_inc_unchecked(&count_count_start);
6853+ while (atomic_read_unchecked(&count_count_start) != 2)
6854 mb();
6855
6856 /*
6857@@ -122,8 +122,8 @@ void synchronise_count_slave(int cpu)
6858 if (i == NR_LOOPS-1)
6859 write_c0_count(initcount);
6860
6861- atomic_inc(&count_count_stop);
6862- while (atomic_read(&count_count_stop) != 2)
6863+ atomic_inc_unchecked(&count_count_stop);
6864+ while (atomic_read_unchecked(&count_count_stop) != 2)
6865 mb();
6866 }
6867 /* Arrange for an interrupt in a short while */
6868diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
6869index 33984c0..666a96d 100644
6870--- a/arch/mips/kernel/traps.c
6871+++ b/arch/mips/kernel/traps.c
6872@@ -689,7 +689,18 @@ asmlinkage void do_ov(struct pt_regs *regs)
6873 siginfo_t info;
6874
6875 prev_state = exception_enter();
6876- die_if_kernel("Integer overflow", regs);
6877+ if (unlikely(!user_mode(regs))) {
6878+
6879+#ifdef CONFIG_PAX_REFCOUNT
6880+ if (fixup_exception(regs)) {
6881+ pax_report_refcount_overflow(regs);
6882+ exception_exit(prev_state);
6883+ return;
6884+ }
6885+#endif
6886+
6887+ die("Integer overflow", regs);
6888+ }
6889
6890 info.si_code = FPE_INTOVF;
6891 info.si_signo = SIGFPE;
6892diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
6893index adf3886..ce8f002 100644
6894--- a/arch/mips/kvm/mips.c
6895+++ b/arch/mips/kvm/mips.c
6896@@ -816,7 +816,7 @@ long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
6897 return r;
6898 }
6899
6900-int kvm_arch_init(void *opaque)
6901+int kvm_arch_init(const void *opaque)
6902 {
6903 if (kvm_mips_callbacks) {
6904 kvm_err("kvm: module already exists\n");
6905diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
6906index 7ff8637..6004edb 100644
6907--- a/arch/mips/mm/fault.c
6908+++ b/arch/mips/mm/fault.c
6909@@ -31,6 +31,23 @@
6910
6911 int show_unhandled_signals = 1;
6912
6913+#ifdef CONFIG_PAX_PAGEEXEC
6914+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
6915+{
6916+ unsigned long i;
6917+
6918+ printk(KERN_ERR "PAX: bytes at PC: ");
6919+ for (i = 0; i < 5; i++) {
6920+ unsigned int c;
6921+ if (get_user(c, (unsigned int *)pc+i))
6922+ printk(KERN_CONT "???????? ");
6923+ else
6924+ printk(KERN_CONT "%08x ", c);
6925+ }
6926+ printk("\n");
6927+}
6928+#endif
6929+
6930 /*
6931 * This routine handles page faults. It determines the address,
6932 * and the problem, and then passes it off to one of the appropriate
6933@@ -206,6 +223,14 @@ bad_area:
6934 bad_area_nosemaphore:
6935 /* User mode accesses just cause a SIGSEGV */
6936 if (user_mode(regs)) {
6937+
6938+#ifdef CONFIG_PAX_PAGEEXEC
6939+ if (cpu_has_rixi && (mm->pax_flags & MF_PAX_PAGEEXEC) && !write && address == instruction_pointer(regs)) {
6940+ pax_report_fault(regs, (void *)address, (void *)user_stack_pointer(regs));
6941+ do_group_exit(SIGKILL);
6942+ }
6943+#endif
6944+
6945 tsk->thread.cp0_badvaddr = address;
6946 tsk->thread.error_code = write;
6947 if (show_unhandled_signals &&
6948diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
6949index f1baadd..5472dca 100644
6950--- a/arch/mips/mm/mmap.c
6951+++ b/arch/mips/mm/mmap.c
6952@@ -59,6 +59,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
6953 struct vm_area_struct *vma;
6954 unsigned long addr = addr0;
6955 int do_color_align;
6956+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
6957 struct vm_unmapped_area_info info;
6958
6959 if (unlikely(len > TASK_SIZE))
6960@@ -84,6 +85,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
6961 do_color_align = 1;
6962
6963 /* requesting a specific address */
6964+
6965+#ifdef CONFIG_PAX_RANDMMAP
6966+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
6967+#endif
6968+
6969 if (addr) {
6970 if (do_color_align)
6971 addr = COLOUR_ALIGN(addr, pgoff);
6972@@ -91,14 +97,14 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
6973 addr = PAGE_ALIGN(addr);
6974
6975 vma = find_vma(mm, addr);
6976- if (TASK_SIZE - len >= addr &&
6977- (!vma || addr + len <= vma->vm_start))
6978+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
6979 return addr;
6980 }
6981
6982 info.length = len;
6983 info.align_mask = do_color_align ? (PAGE_MASK & shm_align_mask) : 0;
6984 info.align_offset = pgoff << PAGE_SHIFT;
6985+ info.threadstack_offset = offset;
6986
6987 if (dir == DOWN) {
6988 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
6989@@ -146,6 +152,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
6990 {
6991 unsigned long random_factor = 0UL;
6992
6993+#ifdef CONFIG_PAX_RANDMMAP
6994+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
6995+#endif
6996+
6997 if (current->flags & PF_RANDOMIZE) {
6998 random_factor = get_random_int();
6999 random_factor = random_factor << PAGE_SHIFT;
7000@@ -157,40 +167,25 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
7001
7002 if (mmap_is_legacy()) {
7003 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
7004+
7005+#ifdef CONFIG_PAX_RANDMMAP
7006+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7007+ mm->mmap_base += mm->delta_mmap;
7008+#endif
7009+
7010 mm->get_unmapped_area = arch_get_unmapped_area;
7011 } else {
7012 mm->mmap_base = mmap_base(random_factor);
7013+
7014+#ifdef CONFIG_PAX_RANDMMAP
7015+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7016+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
7017+#endif
7018+
7019 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
7020 }
7021 }
7022
7023-static inline unsigned long brk_rnd(void)
7024-{
7025- unsigned long rnd = get_random_int();
7026-
7027- rnd = rnd << PAGE_SHIFT;
7028- /* 8MB for 32bit, 256MB for 64bit */
7029- if (TASK_IS_32BIT_ADDR)
7030- rnd = rnd & 0x7ffffful;
7031- else
7032- rnd = rnd & 0xffffffful;
7033-
7034- return rnd;
7035-}
7036-
7037-unsigned long arch_randomize_brk(struct mm_struct *mm)
7038-{
7039- unsigned long base = mm->brk;
7040- unsigned long ret;
7041-
7042- ret = PAGE_ALIGN(base + brk_rnd());
7043-
7044- if (ret < mm->brk)
7045- return mm->brk;
7046-
7047- return ret;
7048-}
7049-
7050 int __virt_addr_valid(const volatile void *kaddr)
7051 {
7052 return pfn_valid(PFN_DOWN(virt_to_phys(kaddr)));
7053diff --git a/arch/mips/sgi-ip27/ip27-nmi.c b/arch/mips/sgi-ip27/ip27-nmi.c
7054index a2358b4..7cead4f 100644
7055--- a/arch/mips/sgi-ip27/ip27-nmi.c
7056+++ b/arch/mips/sgi-ip27/ip27-nmi.c
7057@@ -187,9 +187,9 @@ void
7058 cont_nmi_dump(void)
7059 {
7060 #ifndef REAL_NMI_SIGNAL
7061- static atomic_t nmied_cpus = ATOMIC_INIT(0);
7062+ static atomic_unchecked_t nmied_cpus = ATOMIC_INIT(0);
7063
7064- atomic_inc(&nmied_cpus);
7065+ atomic_inc_unchecked(&nmied_cpus);
7066 #endif
7067 /*
7068 * Only allow 1 cpu to proceed
7069@@ -233,7 +233,7 @@ cont_nmi_dump(void)
7070 udelay(10000);
7071 }
7072 #else
7073- while (atomic_read(&nmied_cpus) != num_online_cpus());
7074+ while (atomic_read_unchecked(&nmied_cpus) != num_online_cpus());
7075 #endif
7076
7077 /*
7078diff --git a/arch/mips/sni/rm200.c b/arch/mips/sni/rm200.c
7079index a046b30..6799527 100644
7080--- a/arch/mips/sni/rm200.c
7081+++ b/arch/mips/sni/rm200.c
7082@@ -270,7 +270,7 @@ spurious_8259A_irq:
7083 "spurious RM200 8259A interrupt: IRQ%d.\n", irq);
7084 spurious_irq_mask |= irqmask;
7085 }
7086- atomic_inc(&irq_err_count);
7087+ atomic_inc_unchecked(&irq_err_count);
7088 /*
7089 * Theoretically we do not have to handle this IRQ,
7090 * but in Linux this does not cause problems and is
7091diff --git a/arch/mips/vr41xx/common/icu.c b/arch/mips/vr41xx/common/icu.c
7092index 41e873b..34d33a7 100644
7093--- a/arch/mips/vr41xx/common/icu.c
7094+++ b/arch/mips/vr41xx/common/icu.c
7095@@ -653,7 +653,7 @@ static int icu_get_irq(unsigned int irq)
7096
7097 printk(KERN_ERR "spurious ICU interrupt: %04x,%04x\n", pend1, pend2);
7098
7099- atomic_inc(&irq_err_count);
7100+ atomic_inc_unchecked(&irq_err_count);
7101
7102 return -1;
7103 }
7104diff --git a/arch/mips/vr41xx/common/irq.c b/arch/mips/vr41xx/common/irq.c
7105index ae0e4ee..e8f0692 100644
7106--- a/arch/mips/vr41xx/common/irq.c
7107+++ b/arch/mips/vr41xx/common/irq.c
7108@@ -64,7 +64,7 @@ static void irq_dispatch(unsigned int irq)
7109 irq_cascade_t *cascade;
7110
7111 if (irq >= NR_IRQS) {
7112- atomic_inc(&irq_err_count);
7113+ atomic_inc_unchecked(&irq_err_count);
7114 return;
7115 }
7116
7117@@ -84,7 +84,7 @@ static void irq_dispatch(unsigned int irq)
7118 ret = cascade->get_irq(irq);
7119 irq = ret;
7120 if (ret < 0)
7121- atomic_inc(&irq_err_count);
7122+ atomic_inc_unchecked(&irq_err_count);
7123 else
7124 irq_dispatch(irq);
7125 if (!irqd_irq_disabled(idata) && chip->irq_unmask)
7126diff --git a/arch/mn10300/proc-mn103e010/include/proc/cache.h b/arch/mn10300/proc-mn103e010/include/proc/cache.h
7127index 967d144..db12197 100644
7128--- a/arch/mn10300/proc-mn103e010/include/proc/cache.h
7129+++ b/arch/mn10300/proc-mn103e010/include/proc/cache.h
7130@@ -11,12 +11,14 @@
7131 #ifndef _ASM_PROC_CACHE_H
7132 #define _ASM_PROC_CACHE_H
7133
7134+#include <linux/const.h>
7135+
7136 /* L1 cache */
7137
7138 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
7139 #define L1_CACHE_NENTRIES 256 /* number of entries in each way */
7140-#define L1_CACHE_BYTES 16 /* bytes per entry */
7141 #define L1_CACHE_SHIFT 4 /* shift for bytes per entry */
7142+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
7143 #define L1_CACHE_WAYDISP 0x1000 /* displacement of one way from the next */
7144
7145 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
7146diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
7147index bcb5df2..84fabd2 100644
7148--- a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
7149+++ b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
7150@@ -16,13 +16,15 @@
7151 #ifndef _ASM_PROC_CACHE_H
7152 #define _ASM_PROC_CACHE_H
7153
7154+#include <linux/const.h>
7155+
7156 /*
7157 * L1 cache
7158 */
7159 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
7160 #define L1_CACHE_NENTRIES 128 /* number of entries in each way */
7161-#define L1_CACHE_BYTES 32 /* bytes per entry */
7162 #define L1_CACHE_SHIFT 5 /* shift for bytes per entry */
7163+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
7164 #define L1_CACHE_WAYDISP 0x1000 /* distance from one way to the next */
7165
7166 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
7167diff --git a/arch/openrisc/include/asm/cache.h b/arch/openrisc/include/asm/cache.h
7168index 4ce7a01..449202a 100644
7169--- a/arch/openrisc/include/asm/cache.h
7170+++ b/arch/openrisc/include/asm/cache.h
7171@@ -19,11 +19,13 @@
7172 #ifndef __ASM_OPENRISC_CACHE_H
7173 #define __ASM_OPENRISC_CACHE_H
7174
7175+#include <linux/const.h>
7176+
7177 /* FIXME: How can we replace these with values from the CPU...
7178 * they shouldn't be hard-coded!
7179 */
7180
7181-#define L1_CACHE_BYTES 16
7182 #define L1_CACHE_SHIFT 4
7183+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7184
7185 #endif /* __ASM_OPENRISC_CACHE_H */
7186diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
7187index 226f8ca9..9d9b87d 100644
7188--- a/arch/parisc/include/asm/atomic.h
7189+++ b/arch/parisc/include/asm/atomic.h
7190@@ -273,6 +273,16 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
7191 return dec;
7192 }
7193
7194+#define atomic64_read_unchecked(v) atomic64_read(v)
7195+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
7196+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
7197+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
7198+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
7199+#define atomic64_inc_unchecked(v) atomic64_inc(v)
7200+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
7201+#define atomic64_dec_unchecked(v) atomic64_dec(v)
7202+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
7203+
7204 #endif /* !CONFIG_64BIT */
7205
7206
7207diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h
7208index 47f11c7..3420df2 100644
7209--- a/arch/parisc/include/asm/cache.h
7210+++ b/arch/parisc/include/asm/cache.h
7211@@ -5,6 +5,7 @@
7212 #ifndef __ARCH_PARISC_CACHE_H
7213 #define __ARCH_PARISC_CACHE_H
7214
7215+#include <linux/const.h>
7216
7217 /*
7218 * PA 2.0 processors have 64-byte cachelines; PA 1.1 processors have
7219@@ -15,13 +16,13 @@
7220 * just ruin performance.
7221 */
7222 #ifdef CONFIG_PA20
7223-#define L1_CACHE_BYTES 64
7224 #define L1_CACHE_SHIFT 6
7225 #else
7226-#define L1_CACHE_BYTES 32
7227 #define L1_CACHE_SHIFT 5
7228 #endif
7229
7230+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7231+
7232 #ifndef __ASSEMBLY__
7233
7234 #define SMP_CACHE_BYTES L1_CACHE_BYTES
7235diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
7236index 78c9fd3..42fa66a 100644
7237--- a/arch/parisc/include/asm/elf.h
7238+++ b/arch/parisc/include/asm/elf.h
7239@@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration... */
7240
7241 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
7242
7243+#ifdef CONFIG_PAX_ASLR
7244+#define PAX_ELF_ET_DYN_BASE 0x10000UL
7245+
7246+#define PAX_DELTA_MMAP_LEN 16
7247+#define PAX_DELTA_STACK_LEN 16
7248+#endif
7249+
7250 /* This yields a mask that user programs can use to figure out what
7251 instruction set this CPU supports. This could be done in user space,
7252 but it's not easy, and we've already done it here. */
7253diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h
7254index d174372..f27fe5c 100644
7255--- a/arch/parisc/include/asm/pgalloc.h
7256+++ b/arch/parisc/include/asm/pgalloc.h
7257@@ -61,6 +61,11 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
7258 (__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT));
7259 }
7260
7261+static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
7262+{
7263+ pgd_populate(mm, pgd, pmd);
7264+}
7265+
7266 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
7267 {
7268 pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT,
7269@@ -96,6 +101,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
7270 #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
7271 #define pmd_free(mm, x) do { } while (0)
7272 #define pgd_populate(mm, pmd, pte) BUG()
7273+#define pgd_populate_kernel(mm, pmd, pte) BUG()
7274
7275 #endif
7276
7277diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
7278index 15207b9..3209e65 100644
7279--- a/arch/parisc/include/asm/pgtable.h
7280+++ b/arch/parisc/include/asm/pgtable.h
7281@@ -215,6 +215,17 @@ extern void purge_tlb_entries(struct mm_struct *, unsigned long);
7282 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
7283 #define PAGE_COPY PAGE_EXECREAD
7284 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
7285+
7286+#ifdef CONFIG_PAX_PAGEEXEC
7287+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
7288+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
7289+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
7290+#else
7291+# define PAGE_SHARED_NOEXEC PAGE_SHARED
7292+# define PAGE_COPY_NOEXEC PAGE_COPY
7293+# define PAGE_READONLY_NOEXEC PAGE_READONLY
7294+#endif
7295+
7296 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
7297 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
7298 #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
7299diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
7300index 0abdd4c..1af92f0 100644
7301--- a/arch/parisc/include/asm/uaccess.h
7302+++ b/arch/parisc/include/asm/uaccess.h
7303@@ -243,10 +243,10 @@ static inline unsigned long __must_check copy_from_user(void *to,
7304 const void __user *from,
7305 unsigned long n)
7306 {
7307- int sz = __compiletime_object_size(to);
7308+ size_t sz = __compiletime_object_size(to);
7309 int ret = -EFAULT;
7310
7311- if (likely(sz == -1 || !__builtin_constant_p(n) || sz >= n))
7312+ if (likely(sz == (size_t)-1 || !__builtin_constant_p(n) || sz >= n))
7313 ret = __copy_from_user(to, from, n);
7314 else
7315 copy_from_user_overflow();
7316diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
7317index 3c63a82..b1d6ee9 100644
7318--- a/arch/parisc/kernel/module.c
7319+++ b/arch/parisc/kernel/module.c
7320@@ -98,16 +98,38 @@
7321
7322 /* three functions to determine where in the module core
7323 * or init pieces the location is */
7324+static inline int in_init_rx(struct module *me, void *loc)
7325+{
7326+ return (loc >= me->module_init_rx &&
7327+ loc < (me->module_init_rx + me->init_size_rx));
7328+}
7329+
7330+static inline int in_init_rw(struct module *me, void *loc)
7331+{
7332+ return (loc >= me->module_init_rw &&
7333+ loc < (me->module_init_rw + me->init_size_rw));
7334+}
7335+
7336 static inline int in_init(struct module *me, void *loc)
7337 {
7338- return (loc >= me->module_init &&
7339- loc <= (me->module_init + me->init_size));
7340+ return in_init_rx(me, loc) || in_init_rw(me, loc);
7341+}
7342+
7343+static inline int in_core_rx(struct module *me, void *loc)
7344+{
7345+ return (loc >= me->module_core_rx &&
7346+ loc < (me->module_core_rx + me->core_size_rx));
7347+}
7348+
7349+static inline int in_core_rw(struct module *me, void *loc)
7350+{
7351+ return (loc >= me->module_core_rw &&
7352+ loc < (me->module_core_rw + me->core_size_rw));
7353 }
7354
7355 static inline int in_core(struct module *me, void *loc)
7356 {
7357- return (loc >= me->module_core &&
7358- loc <= (me->module_core + me->core_size));
7359+ return in_core_rx(me, loc) || in_core_rw(me, loc);
7360 }
7361
7362 static inline int in_local(struct module *me, void *loc)
7363@@ -367,13 +389,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
7364 }
7365
7366 /* align things a bit */
7367- me->core_size = ALIGN(me->core_size, 16);
7368- me->arch.got_offset = me->core_size;
7369- me->core_size += gots * sizeof(struct got_entry);
7370+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
7371+ me->arch.got_offset = me->core_size_rw;
7372+ me->core_size_rw += gots * sizeof(struct got_entry);
7373
7374- me->core_size = ALIGN(me->core_size, 16);
7375- me->arch.fdesc_offset = me->core_size;
7376- me->core_size += fdescs * sizeof(Elf_Fdesc);
7377+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
7378+ me->arch.fdesc_offset = me->core_size_rw;
7379+ me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
7380
7381 me->arch.got_max = gots;
7382 me->arch.fdesc_max = fdescs;
7383@@ -391,7 +413,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
7384
7385 BUG_ON(value == 0);
7386
7387- got = me->module_core + me->arch.got_offset;
7388+ got = me->module_core_rw + me->arch.got_offset;
7389 for (i = 0; got[i].addr; i++)
7390 if (got[i].addr == value)
7391 goto out;
7392@@ -409,7 +431,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
7393 #ifdef CONFIG_64BIT
7394 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
7395 {
7396- Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
7397+ Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
7398
7399 if (!value) {
7400 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
7401@@ -427,7 +449,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
7402
7403 /* Create new one */
7404 fdesc->addr = value;
7405- fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
7406+ fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
7407 return (Elf_Addr)fdesc;
7408 }
7409 #endif /* CONFIG_64BIT */
7410@@ -839,7 +861,7 @@ register_unwind_table(struct module *me,
7411
7412 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
7413 end = table + sechdrs[me->arch.unwind_section].sh_size;
7414- gp = (Elf_Addr)me->module_core + me->arch.got_offset;
7415+ gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
7416
7417 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
7418 me->arch.unwind_section, table, end, gp);
7419diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
7420index 5aba01a..47cdd5a 100644
7421--- a/arch/parisc/kernel/sys_parisc.c
7422+++ b/arch/parisc/kernel/sys_parisc.c
7423@@ -92,6 +92,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
7424 unsigned long task_size = TASK_SIZE;
7425 int do_color_align, last_mmap;
7426 struct vm_unmapped_area_info info;
7427+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
7428
7429 if (len > task_size)
7430 return -ENOMEM;
7431@@ -109,6 +110,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
7432 goto found_addr;
7433 }
7434
7435+#ifdef CONFIG_PAX_RANDMMAP
7436+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
7437+#endif
7438+
7439 if (addr) {
7440 if (do_color_align && last_mmap)
7441 addr = COLOR_ALIGN(addr, last_mmap, pgoff);
7442@@ -127,6 +132,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
7443 info.high_limit = mmap_upper_limit();
7444 info.align_mask = last_mmap ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0;
7445 info.align_offset = shared_align_offset(last_mmap, pgoff);
7446+ info.threadstack_offset = offset;
7447 addr = vm_unmapped_area(&info);
7448
7449 found_addr:
7450@@ -146,6 +152,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7451 unsigned long addr = addr0;
7452 int do_color_align, last_mmap;
7453 struct vm_unmapped_area_info info;
7454+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
7455
7456 #ifdef CONFIG_64BIT
7457 /* This should only ever run for 32-bit processes. */
7458@@ -170,6 +177,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7459 }
7460
7461 /* requesting a specific address */
7462+#ifdef CONFIG_PAX_RANDMMAP
7463+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
7464+#endif
7465+
7466 if (addr) {
7467 if (do_color_align && last_mmap)
7468 addr = COLOR_ALIGN(addr, last_mmap, pgoff);
7469@@ -187,6 +198,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7470 info.high_limit = mm->mmap_base;
7471 info.align_mask = last_mmap ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0;
7472 info.align_offset = shared_align_offset(last_mmap, pgoff);
7473+ info.threadstack_offset = offset;
7474 addr = vm_unmapped_area(&info);
7475 if (!(addr & ~PAGE_MASK))
7476 goto found_addr;
7477@@ -252,6 +264,13 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
7478 mm->mmap_legacy_base = mmap_legacy_base();
7479 mm->mmap_base = mmap_upper_limit();
7480
7481+#ifdef CONFIG_PAX_RANDMMAP
7482+ if (mm->pax_flags & MF_PAX_RANDMMAP) {
7483+ mm->mmap_legacy_base += mm->delta_mmap;
7484+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
7485+ }
7486+#endif
7487+
7488 if (mmap_is_legacy()) {
7489 mm->mmap_base = mm->mmap_legacy_base;
7490 mm->get_unmapped_area = arch_get_unmapped_area;
7491diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
7492index 47ee620..1107387 100644
7493--- a/arch/parisc/kernel/traps.c
7494+++ b/arch/parisc/kernel/traps.c
7495@@ -726,9 +726,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
7496
7497 down_read(&current->mm->mmap_sem);
7498 vma = find_vma(current->mm,regs->iaoq[0]);
7499- if (vma && (regs->iaoq[0] >= vma->vm_start)
7500- && (vma->vm_flags & VM_EXEC)) {
7501-
7502+ if (vma && (regs->iaoq[0] >= vma->vm_start)) {
7503 fault_address = regs->iaoq[0];
7504 fault_space = regs->iasq[0];
7505
7506diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
7507index e5120e6..8ddb5cc 100644
7508--- a/arch/parisc/mm/fault.c
7509+++ b/arch/parisc/mm/fault.c
7510@@ -15,6 +15,7 @@
7511 #include <linux/sched.h>
7512 #include <linux/interrupt.h>
7513 #include <linux/module.h>
7514+#include <linux/unistd.h>
7515
7516 #include <asm/uaccess.h>
7517 #include <asm/traps.h>
7518@@ -50,7 +51,7 @@ int show_unhandled_signals = 1;
7519 static unsigned long
7520 parisc_acctyp(unsigned long code, unsigned int inst)
7521 {
7522- if (code == 6 || code == 16)
7523+ if (code == 6 || code == 7 || code == 16)
7524 return VM_EXEC;
7525
7526 switch (inst & 0xf0000000) {
7527@@ -136,6 +137,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
7528 }
7529 #endif
7530
7531+#ifdef CONFIG_PAX_PAGEEXEC
7532+/*
7533+ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
7534+ *
7535+ * returns 1 when task should be killed
7536+ * 2 when rt_sigreturn trampoline was detected
7537+ * 3 when unpatched PLT trampoline was detected
7538+ */
7539+static int pax_handle_fetch_fault(struct pt_regs *regs)
7540+{
7541+
7542+#ifdef CONFIG_PAX_EMUPLT
7543+ int err;
7544+
7545+ do { /* PaX: unpatched PLT emulation */
7546+ unsigned int bl, depwi;
7547+
7548+ err = get_user(bl, (unsigned int *)instruction_pointer(regs));
7549+ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
7550+
7551+ if (err)
7552+ break;
7553+
7554+ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
7555+ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
7556+
7557+ err = get_user(ldw, (unsigned int *)addr);
7558+ err |= get_user(bv, (unsigned int *)(addr+4));
7559+ err |= get_user(ldw2, (unsigned int *)(addr+8));
7560+
7561+ if (err)
7562+ break;
7563+
7564+ if (ldw == 0x0E801096U &&
7565+ bv == 0xEAC0C000U &&
7566+ ldw2 == 0x0E881095U)
7567+ {
7568+ unsigned int resolver, map;
7569+
7570+ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
7571+ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
7572+ if (err)
7573+ break;
7574+
7575+ regs->gr[20] = instruction_pointer(regs)+8;
7576+ regs->gr[21] = map;
7577+ regs->gr[22] = resolver;
7578+ regs->iaoq[0] = resolver | 3UL;
7579+ regs->iaoq[1] = regs->iaoq[0] + 4;
7580+ return 3;
7581+ }
7582+ }
7583+ } while (0);
7584+#endif
7585+
7586+#ifdef CONFIG_PAX_EMUTRAMP
7587+
7588+#ifndef CONFIG_PAX_EMUSIGRT
7589+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
7590+ return 1;
7591+#endif
7592+
7593+ do { /* PaX: rt_sigreturn emulation */
7594+ unsigned int ldi1, ldi2, bel, nop;
7595+
7596+ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
7597+ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
7598+ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
7599+ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
7600+
7601+ if (err)
7602+ break;
7603+
7604+ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
7605+ ldi2 == 0x3414015AU &&
7606+ bel == 0xE4008200U &&
7607+ nop == 0x08000240U)
7608+ {
7609+ regs->gr[25] = (ldi1 & 2) >> 1;
7610+ regs->gr[20] = __NR_rt_sigreturn;
7611+ regs->gr[31] = regs->iaoq[1] + 16;
7612+ regs->sr[0] = regs->iasq[1];
7613+ regs->iaoq[0] = 0x100UL;
7614+ regs->iaoq[1] = regs->iaoq[0] + 4;
7615+ regs->iasq[0] = regs->sr[2];
7616+ regs->iasq[1] = regs->sr[2];
7617+ return 2;
7618+ }
7619+ } while (0);
7620+#endif
7621+
7622+ return 1;
7623+}
7624+
7625+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
7626+{
7627+ unsigned long i;
7628+
7629+ printk(KERN_ERR "PAX: bytes at PC: ");
7630+ for (i = 0; i < 5; i++) {
7631+ unsigned int c;
7632+ if (get_user(c, (unsigned int *)pc+i))
7633+ printk(KERN_CONT "???????? ");
7634+ else
7635+ printk(KERN_CONT "%08x ", c);
7636+ }
7637+ printk("\n");
7638+}
7639+#endif
7640+
7641 int fixup_exception(struct pt_regs *regs)
7642 {
7643 const struct exception_table_entry *fix;
7644@@ -234,8 +345,33 @@ retry:
7645
7646 good_area:
7647
7648- if ((vma->vm_flags & acc_type) != acc_type)
7649+ if ((vma->vm_flags & acc_type) != acc_type) {
7650+
7651+#ifdef CONFIG_PAX_PAGEEXEC
7652+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
7653+ (address & ~3UL) == instruction_pointer(regs))
7654+ {
7655+ up_read(&mm->mmap_sem);
7656+ switch (pax_handle_fetch_fault(regs)) {
7657+
7658+#ifdef CONFIG_PAX_EMUPLT
7659+ case 3:
7660+ return;
7661+#endif
7662+
7663+#ifdef CONFIG_PAX_EMUTRAMP
7664+ case 2:
7665+ return;
7666+#endif
7667+
7668+ }
7669+ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
7670+ do_group_exit(SIGKILL);
7671+ }
7672+#endif
7673+
7674 goto bad_area;
7675+ }
7676
7677 /*
7678 * If for any reason at all we couldn't handle the fault, make
7679diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
7680index 22b0940..309f790 100644
7681--- a/arch/powerpc/Kconfig
7682+++ b/arch/powerpc/Kconfig
7683@@ -409,6 +409,7 @@ config PPC64_SUPPORTS_MEMORY_FAILURE
7684 config KEXEC
7685 bool "kexec system call"
7686 depends on (PPC_BOOK3S || FSL_BOOKE || (44x && !SMP))
7687+ depends on !GRKERNSEC_KMEM
7688 help
7689 kexec is a system call that implements the ability to shutdown your
7690 current kernel, and to start another kernel. It is like a reboot
7691diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
7692index 512d278..d31fadd 100644
7693--- a/arch/powerpc/include/asm/atomic.h
7694+++ b/arch/powerpc/include/asm/atomic.h
7695@@ -12,6 +12,11 @@
7696
7697 #define ATOMIC_INIT(i) { (i) }
7698
7699+#define _ASM_EXTABLE(from, to) \
7700+" .section __ex_table,\"a\"\n" \
7701+ PPC_LONG" " #from ", " #to"\n" \
7702+" .previous\n"
7703+
7704 static __inline__ int atomic_read(const atomic_t *v)
7705 {
7706 int t;
7707@@ -21,39 +26,80 @@ static __inline__ int atomic_read(const atomic_t *v)
7708 return t;
7709 }
7710
7711+static __inline__ int atomic_read_unchecked(const atomic_unchecked_t *v)
7712+{
7713+ int t;
7714+
7715+ __asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
7716+
7717+ return t;
7718+}
7719+
7720 static __inline__ void atomic_set(atomic_t *v, int i)
7721 {
7722 __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
7723 }
7724
7725-#define ATOMIC_OP(op, asm_op) \
7726-static __inline__ void atomic_##op(int a, atomic_t *v) \
7727+static __inline__ void atomic_set_unchecked(atomic_unchecked_t *v, int i)
7728+{
7729+ __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
7730+}
7731+
7732+#ifdef CONFIG_PAX_REFCOUNT
7733+#define __REFCOUNT_OP(op) op##o.
7734+#define __OVERFLOW_PRE \
7735+ " mcrxr cr0\n"
7736+#define __OVERFLOW_POST \
7737+ " bf 4*cr0+so, 3f\n" \
7738+ "2: .long 0x00c00b00\n" \
7739+ "3:\n"
7740+#define __OVERFLOW_EXTABLE \
7741+ "\n4:\n"
7742+ _ASM_EXTABLE(2b, 4b)
7743+#else
7744+#define __REFCOUNT_OP(op) op
7745+#define __OVERFLOW_PRE
7746+#define __OVERFLOW_POST
7747+#define __OVERFLOW_EXTABLE
7748+#endif
7749+
7750+#define __ATOMIC_OP(op, suffix, pre_op, asm_op, post_op, extable) \
7751+static inline void atomic_##op##suffix(int a, atomic##suffix##_t *v) \
7752 { \
7753 int t; \
7754 \
7755 __asm__ __volatile__( \
7756-"1: lwarx %0,0,%3 # atomic_" #op "\n" \
7757+"1: lwarx %0,0,%3 # atomic_" #op #suffix "\n" \
7758+ pre_op \
7759 #asm_op " %0,%2,%0\n" \
7760+ post_op \
7761 PPC405_ERR77(0,%3) \
7762 " stwcx. %0,0,%3 \n" \
7763 " bne- 1b\n" \
7764+ extable \
7765 : "=&r" (t), "+m" (v->counter) \
7766 : "r" (a), "r" (&v->counter) \
7767 : "cc"); \
7768 } \
7769
7770-#define ATOMIC_OP_RETURN(op, asm_op) \
7771-static __inline__ int atomic_##op##_return(int a, atomic_t *v) \
7772+#define ATOMIC_OP(op, asm_op) __ATOMIC_OP(op, , , asm_op, , ) \
7773+ __ATOMIC_OP(op, _unchecked, __OVERFLOW_PRE, __REFCOUNT_OP(asm_op), __OVERFLOW_POST, __OVERFLOW_EXTABLE)
7774+
7775+#define __ATOMIC_OP_RETURN(op, suffix, pre_op, asm_op, post_op, extable)\
7776+static inline int atomic_##op##_return##suffix(int a, atomic##suffix##_t *v)\
7777 { \
7778 int t; \
7779 \
7780 __asm__ __volatile__( \
7781 PPC_ATOMIC_ENTRY_BARRIER \
7782-"1: lwarx %0,0,%2 # atomic_" #op "_return\n" \
7783+"1: lwarx %0,0,%2 # atomic_" #op "_return" #suffix "\n" \
7784+ pre_op \
7785 #asm_op " %0,%1,%0\n" \
7786+ post_op \
7787 PPC405_ERR77(0,%2) \
7788 " stwcx. %0,0,%2 \n" \
7789 " bne- 1b\n" \
7790+ extable \
7791 PPC_ATOMIC_EXIT_BARRIER \
7792 : "=&r" (t) \
7793 : "r" (a), "r" (&v->counter) \
7794@@ -62,6 +108,9 @@ static __inline__ int atomic_##op##_return(int a, atomic_t *v) \
7795 return t; \
7796 }
7797
7798+#define ATOMIC_OP_RETURN(op, asm_op) __ATOMIC_OP_RETURN(op, , , asm_op, , )\
7799+ __ATOMIC_OP_RETURN(op, _unchecked, __OVERFLOW_PRE, __REFCOUNT_OP(asm_op), __OVERFLOW_POST, __OVERFLOW_EXTABLE)
7800+
7801 #define ATOMIC_OPS(op, asm_op) ATOMIC_OP(op, asm_op) ATOMIC_OP_RETURN(op, asm_op)
7802
7803 ATOMIC_OPS(add, add)
7804@@ -69,42 +118,29 @@ ATOMIC_OPS(sub, subf)
7805
7806 #undef ATOMIC_OPS
7807 #undef ATOMIC_OP_RETURN
7808+#undef __ATOMIC_OP_RETURN
7809 #undef ATOMIC_OP
7810+#undef __ATOMIC_OP
7811
7812 #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
7813
7814-static __inline__ void atomic_inc(atomic_t *v)
7815-{
7816- int t;
7817+/*
7818+ * atomic_inc - increment atomic variable
7819+ * @v: pointer of type atomic_t
7820+ *
7821+ * Automatically increments @v by 1
7822+ */
7823+#define atomic_inc(v) atomic_add(1, (v))
7824+#define atomic_inc_return(v) atomic_add_return(1, (v))
7825
7826- __asm__ __volatile__(
7827-"1: lwarx %0,0,%2 # atomic_inc\n\
7828- addic %0,%0,1\n"
7829- PPC405_ERR77(0,%2)
7830-" stwcx. %0,0,%2 \n\
7831- bne- 1b"
7832- : "=&r" (t), "+m" (v->counter)
7833- : "r" (&v->counter)
7834- : "cc", "xer");
7835+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
7836+{
7837+ atomic_add_unchecked(1, v);
7838 }
7839
7840-static __inline__ int atomic_inc_return(atomic_t *v)
7841+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
7842 {
7843- int t;
7844-
7845- __asm__ __volatile__(
7846- PPC_ATOMIC_ENTRY_BARRIER
7847-"1: lwarx %0,0,%1 # atomic_inc_return\n\
7848- addic %0,%0,1\n"
7849- PPC405_ERR77(0,%1)
7850-" stwcx. %0,0,%1 \n\
7851- bne- 1b"
7852- PPC_ATOMIC_EXIT_BARRIER
7853- : "=&r" (t)
7854- : "r" (&v->counter)
7855- : "cc", "xer", "memory");
7856-
7857- return t;
7858+ return atomic_add_return_unchecked(1, v);
7859 }
7860
7861 /*
7862@@ -117,43 +153,38 @@ static __inline__ int atomic_inc_return(atomic_t *v)
7863 */
7864 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
7865
7866-static __inline__ void atomic_dec(atomic_t *v)
7867+static __inline__ int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
7868 {
7869- int t;
7870-
7871- __asm__ __volatile__(
7872-"1: lwarx %0,0,%2 # atomic_dec\n\
7873- addic %0,%0,-1\n"
7874- PPC405_ERR77(0,%2)\
7875-" stwcx. %0,0,%2\n\
7876- bne- 1b"
7877- : "=&r" (t), "+m" (v->counter)
7878- : "r" (&v->counter)
7879- : "cc", "xer");
7880+ return atomic_add_return_unchecked(1, v) == 0;
7881 }
7882
7883-static __inline__ int atomic_dec_return(atomic_t *v)
7884+/*
7885+ * atomic_dec - decrement atomic variable
7886+ * @v: pointer of type atomic_t
7887+ *
7888+ * Atomically decrements @v by 1
7889+ */
7890+#define atomic_dec(v) atomic_sub(1, (v))
7891+#define atomic_dec_return(v) atomic_sub_return(1, (v))
7892+
7893+static __inline__ void atomic_dec_unchecked(atomic_unchecked_t *v)
7894 {
7895- int t;
7896-
7897- __asm__ __volatile__(
7898- PPC_ATOMIC_ENTRY_BARRIER
7899-"1: lwarx %0,0,%1 # atomic_dec_return\n\
7900- addic %0,%0,-1\n"
7901- PPC405_ERR77(0,%1)
7902-" stwcx. %0,0,%1\n\
7903- bne- 1b"
7904- PPC_ATOMIC_EXIT_BARRIER
7905- : "=&r" (t)
7906- : "r" (&v->counter)
7907- : "cc", "xer", "memory");
7908-
7909- return t;
7910+ atomic_sub_unchecked(1, v);
7911 }
7912
7913 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
7914 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
7915
7916+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
7917+{
7918+ return cmpxchg(&(v->counter), old, new);
7919+}
7920+
7921+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
7922+{
7923+ return xchg(&(v->counter), new);
7924+}
7925+
7926 /**
7927 * __atomic_add_unless - add unless the number is a given value
7928 * @v: pointer of type atomic_t
7929@@ -171,11 +202,27 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
7930 PPC_ATOMIC_ENTRY_BARRIER
7931 "1: lwarx %0,0,%1 # __atomic_add_unless\n\
7932 cmpw 0,%0,%3 \n\
7933- beq- 2f \n\
7934- add %0,%2,%0 \n"
7935+ beq- 2f \n"
7936+
7937+#ifdef CONFIG_PAX_REFCOUNT
7938+" mcrxr cr0\n"
7939+" addo. %0,%2,%0\n"
7940+" bf 4*cr0+so, 4f\n"
7941+"3:.long " "0x00c00b00""\n"
7942+"4:\n"
7943+#else
7944+ "add %0,%2,%0 \n"
7945+#endif
7946+
7947 PPC405_ERR77(0,%2)
7948 " stwcx. %0,0,%1 \n\
7949 bne- 1b \n"
7950+"5:"
7951+
7952+#ifdef CONFIG_PAX_REFCOUNT
7953+ _ASM_EXTABLE(3b, 5b)
7954+#endif
7955+
7956 PPC_ATOMIC_EXIT_BARRIER
7957 " subf %0,%2,%0 \n\
7958 2:"
7959@@ -248,6 +295,11 @@ static __inline__ int atomic_dec_if_positive(atomic_t *v)
7960 }
7961 #define atomic_dec_if_positive atomic_dec_if_positive
7962
7963+#define smp_mb__before_atomic_dec() smp_mb()
7964+#define smp_mb__after_atomic_dec() smp_mb()
7965+#define smp_mb__before_atomic_inc() smp_mb()
7966+#define smp_mb__after_atomic_inc() smp_mb()
7967+
7968 #ifdef __powerpc64__
7969
7970 #define ATOMIC64_INIT(i) { (i) }
7971@@ -261,37 +313,60 @@ static __inline__ long atomic64_read(const atomic64_t *v)
7972 return t;
7973 }
7974
7975+static __inline__ long atomic64_read_unchecked(const atomic64_unchecked_t *v)
7976+{
7977+ long t;
7978+
7979+ __asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
7980+
7981+ return t;
7982+}
7983+
7984 static __inline__ void atomic64_set(atomic64_t *v, long i)
7985 {
7986 __asm__ __volatile__("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
7987 }
7988
7989-#define ATOMIC64_OP(op, asm_op) \
7990-static __inline__ void atomic64_##op(long a, atomic64_t *v) \
7991+static __inline__ void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
7992+{
7993+ __asm__ __volatile__("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
7994+}
7995+
7996+#define __ATOMIC64_OP(op, suffix, pre_op, asm_op, post_op, extable) \
7997+static inline void atomic64_##op##suffix(long a, atomic64##suffix##_t *v)\
7998 { \
7999 long t; \
8000 \
8001 __asm__ __volatile__( \
8002 "1: ldarx %0,0,%3 # atomic64_" #op "\n" \
8003+ pre_op \
8004 #asm_op " %0,%2,%0\n" \
8005+ post_op \
8006 " stdcx. %0,0,%3 \n" \
8007 " bne- 1b\n" \
8008+ extable \
8009 : "=&r" (t), "+m" (v->counter) \
8010 : "r" (a), "r" (&v->counter) \
8011 : "cc"); \
8012 }
8013
8014-#define ATOMIC64_OP_RETURN(op, asm_op) \
8015-static __inline__ long atomic64_##op##_return(long a, atomic64_t *v) \
8016+#define ATOMIC64_OP(op, asm_op) __ATOMIC64_OP(op, , , asm_op, , ) \
8017+ __ATOMIC64_OP(op, _unchecked, __OVERFLOW_PRE, __REFCOUNT_OP(asm_op), __OVERFLOW_POST, __OVERFLOW_EXTABLE)
8018+
8019+#define __ATOMIC64_OP_RETURN(op, suffix, pre_op, asm_op, post_op, extable)\
8020+static inline long atomic64_##op##_return##suffix(long a, atomic64##suffix##_t *v)\
8021 { \
8022 long t; \
8023 \
8024 __asm__ __volatile__( \
8025 PPC_ATOMIC_ENTRY_BARRIER \
8026 "1: ldarx %0,0,%2 # atomic64_" #op "_return\n" \
8027+ pre_op \
8028 #asm_op " %0,%1,%0\n" \
8029+ post_op \
8030 " stdcx. %0,0,%2 \n" \
8031 " bne- 1b\n" \
8032+ extable \
8033 PPC_ATOMIC_EXIT_BARRIER \
8034 : "=&r" (t) \
8035 : "r" (a), "r" (&v->counter) \
8036@@ -300,6 +375,9 @@ static __inline__ long atomic64_##op##_return(long a, atomic64_t *v) \
8037 return t; \
8038 }
8039
8040+#define ATOMIC64_OP_RETURN(op, asm_op) __ATOMIC64_OP_RETURN(op, , , asm_op, , )\
8041+ __ATOMIC64_OP_RETURN(op, _unchecked, __OVERFLOW_PRE, __REFCOUNT_OP(asm_op), __OVERFLOW_POST, __OVERFLOW_EXTABLE)
8042+
8043 #define ATOMIC64_OPS(op, asm_op) ATOMIC64_OP(op, asm_op) ATOMIC64_OP_RETURN(op, asm_op)
8044
8045 ATOMIC64_OPS(add, add)
8046@@ -307,40 +385,33 @@ ATOMIC64_OPS(sub, subf)
8047
8048 #undef ATOMIC64_OPS
8049 #undef ATOMIC64_OP_RETURN
8050+#undef __ATOMIC64_OP_RETURN
8051 #undef ATOMIC64_OP
8052+#undef __ATOMIC64_OP
8053+#undef __OVERFLOW_EXTABLE
8054+#undef __OVERFLOW_POST
8055+#undef __OVERFLOW_PRE
8056+#undef __REFCOUNT_OP
8057
8058 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
8059
8060-static __inline__ void atomic64_inc(atomic64_t *v)
8061-{
8062- long t;
8063+/*
8064+ * atomic64_inc - increment atomic variable
8065+ * @v: pointer of type atomic64_t
8066+ *
8067+ * Automatically increments @v by 1
8068+ */
8069+#define atomic64_inc(v) atomic64_add(1, (v))
8070+#define atomic64_inc_return(v) atomic64_add_return(1, (v))
8071
8072- __asm__ __volatile__(
8073-"1: ldarx %0,0,%2 # atomic64_inc\n\
8074- addic %0,%0,1\n\
8075- stdcx. %0,0,%2 \n\
8076- bne- 1b"
8077- : "=&r" (t), "+m" (v->counter)
8078- : "r" (&v->counter)
8079- : "cc", "xer");
8080+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
8081+{
8082+ atomic64_add_unchecked(1, v);
8083 }
8084
8085-static __inline__ long atomic64_inc_return(atomic64_t *v)
8086+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
8087 {
8088- long t;
8089-
8090- __asm__ __volatile__(
8091- PPC_ATOMIC_ENTRY_BARRIER
8092-"1: ldarx %0,0,%1 # atomic64_inc_return\n\
8093- addic %0,%0,1\n\
8094- stdcx. %0,0,%1 \n\
8095- bne- 1b"
8096- PPC_ATOMIC_EXIT_BARRIER
8097- : "=&r" (t)
8098- : "r" (&v->counter)
8099- : "cc", "xer", "memory");
8100-
8101- return t;
8102+ return atomic64_add_return_unchecked(1, v);
8103 }
8104
8105 /*
8106@@ -353,36 +424,18 @@ static __inline__ long atomic64_inc_return(atomic64_t *v)
8107 */
8108 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
8109
8110-static __inline__ void atomic64_dec(atomic64_t *v)
8111+/*
8112+ * atomic64_dec - decrement atomic variable
8113+ * @v: pointer of type atomic64_t
8114+ *
8115+ * Atomically decrements @v by 1
8116+ */
8117+#define atomic64_dec(v) atomic64_sub(1, (v))
8118+#define atomic64_dec_return(v) atomic64_sub_return(1, (v))
8119+
8120+static __inline__ void atomic64_dec_unchecked(atomic64_unchecked_t *v)
8121 {
8122- long t;
8123-
8124- __asm__ __volatile__(
8125-"1: ldarx %0,0,%2 # atomic64_dec\n\
8126- addic %0,%0,-1\n\
8127- stdcx. %0,0,%2\n\
8128- bne- 1b"
8129- : "=&r" (t), "+m" (v->counter)
8130- : "r" (&v->counter)
8131- : "cc", "xer");
8132-}
8133-
8134-static __inline__ long atomic64_dec_return(atomic64_t *v)
8135-{
8136- long t;
8137-
8138- __asm__ __volatile__(
8139- PPC_ATOMIC_ENTRY_BARRIER
8140-"1: ldarx %0,0,%1 # atomic64_dec_return\n\
8141- addic %0,%0,-1\n\
8142- stdcx. %0,0,%1\n\
8143- bne- 1b"
8144- PPC_ATOMIC_EXIT_BARRIER
8145- : "=&r" (t)
8146- : "r" (&v->counter)
8147- : "cc", "xer", "memory");
8148-
8149- return t;
8150+ atomic64_sub_unchecked(1, v);
8151 }
8152
8153 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
8154@@ -415,6 +468,16 @@ static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
8155 #define atomic64_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
8156 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
8157
8158+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
8159+{
8160+ return cmpxchg(&(v->counter), old, new);
8161+}
8162+
8163+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
8164+{
8165+ return xchg(&(v->counter), new);
8166+}
8167+
8168 /**
8169 * atomic64_add_unless - add unless the number is a given value
8170 * @v: pointer of type atomic64_t
8171@@ -430,13 +493,29 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
8172
8173 __asm__ __volatile__ (
8174 PPC_ATOMIC_ENTRY_BARRIER
8175-"1: ldarx %0,0,%1 # __atomic_add_unless\n\
8176+"1: ldarx %0,0,%1 # atomic64_add_unless\n\
8177 cmpd 0,%0,%3 \n\
8178- beq- 2f \n\
8179- add %0,%2,%0 \n"
8180+ beq- 2f \n"
8181+
8182+#ifdef CONFIG_PAX_REFCOUNT
8183+" mcrxr cr0\n"
8184+" addo. %0,%2,%0\n"
8185+" bf 4*cr0+so, 4f\n"
8186+"3:.long " "0x00c00b00""\n"
8187+"4:\n"
8188+#else
8189+ "add %0,%2,%0 \n"
8190+#endif
8191+
8192 " stdcx. %0,0,%1 \n\
8193 bne- 1b \n"
8194 PPC_ATOMIC_EXIT_BARRIER
8195+"5:"
8196+
8197+#ifdef CONFIG_PAX_REFCOUNT
8198+ _ASM_EXTABLE(3b, 5b)
8199+#endif
8200+
8201 " subf %0,%2,%0 \n\
8202 2:"
8203 : "=&r" (t)
8204diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h
8205index a3bf5be..e03ba81 100644
8206--- a/arch/powerpc/include/asm/barrier.h
8207+++ b/arch/powerpc/include/asm/barrier.h
8208@@ -76,7 +76,7 @@
8209 do { \
8210 compiletime_assert_atomic_type(*p); \
8211 smp_lwsync(); \
8212- ACCESS_ONCE(*p) = (v); \
8213+ ACCESS_ONCE_RW(*p) = (v); \
8214 } while (0)
8215
8216 #define smp_load_acquire(p) \
8217diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
8218index 34a05a1..a1f2c67 100644
8219--- a/arch/powerpc/include/asm/cache.h
8220+++ b/arch/powerpc/include/asm/cache.h
8221@@ -4,6 +4,7 @@
8222 #ifdef __KERNEL__
8223
8224 #include <asm/reg.h>
8225+#include <linux/const.h>
8226
8227 /* bytes per L1 cache line */
8228 #if defined(CONFIG_8xx) || defined(CONFIG_403GCX)
8229@@ -23,7 +24,7 @@
8230 #define L1_CACHE_SHIFT 7
8231 #endif
8232
8233-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
8234+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
8235
8236 #define SMP_CACHE_BYTES L1_CACHE_BYTES
8237
8238diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
8239index 57d289a..b36c98c 100644
8240--- a/arch/powerpc/include/asm/elf.h
8241+++ b/arch/powerpc/include/asm/elf.h
8242@@ -30,6 +30,18 @@
8243
8244 #define ELF_ET_DYN_BASE 0x20000000
8245
8246+#ifdef CONFIG_PAX_ASLR
8247+#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
8248+
8249+#ifdef __powerpc64__
8250+#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
8251+#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
8252+#else
8253+#define PAX_DELTA_MMAP_LEN 15
8254+#define PAX_DELTA_STACK_LEN 15
8255+#endif
8256+#endif
8257+
8258 #define ELF_CORE_EFLAGS (is_elf2_task() ? 2 : 0)
8259
8260 /*
8261@@ -128,10 +140,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
8262 (0x7ff >> (PAGE_SHIFT - 12)) : \
8263 (0x3ffff >> (PAGE_SHIFT - 12)))
8264
8265-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
8266-#define arch_randomize_brk arch_randomize_brk
8267-
8268-
8269 #ifdef CONFIG_SPU_BASE
8270 /* Notes used in ET_CORE. Note name is "SPU/<fd>/<filename>". */
8271 #define NT_SPU 1
8272diff --git a/arch/powerpc/include/asm/exec.h b/arch/powerpc/include/asm/exec.h
8273index 8196e9c..d83a9f3 100644
8274--- a/arch/powerpc/include/asm/exec.h
8275+++ b/arch/powerpc/include/asm/exec.h
8276@@ -4,6 +4,6 @@
8277 #ifndef _ASM_POWERPC_EXEC_H
8278 #define _ASM_POWERPC_EXEC_H
8279
8280-extern unsigned long arch_align_stack(unsigned long sp);
8281+#define arch_align_stack(x) ((x) & ~0xfUL)
8282
8283 #endif /* _ASM_POWERPC_EXEC_H */
8284diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
8285index 5acabbd..7ea14fa 100644
8286--- a/arch/powerpc/include/asm/kmap_types.h
8287+++ b/arch/powerpc/include/asm/kmap_types.h
8288@@ -10,7 +10,7 @@
8289 * 2 of the License, or (at your option) any later version.
8290 */
8291
8292-#define KM_TYPE_NR 16
8293+#define KM_TYPE_NR 17
8294
8295 #endif /* __KERNEL__ */
8296 #endif /* _ASM_POWERPC_KMAP_TYPES_H */
8297diff --git a/arch/powerpc/include/asm/local.h b/arch/powerpc/include/asm/local.h
8298index b8da913..c02b593 100644
8299--- a/arch/powerpc/include/asm/local.h
8300+++ b/arch/powerpc/include/asm/local.h
8301@@ -9,21 +9,65 @@ typedef struct
8302 atomic_long_t a;
8303 } local_t;
8304
8305+typedef struct
8306+{
8307+ atomic_long_unchecked_t a;
8308+} local_unchecked_t;
8309+
8310 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
8311
8312 #define local_read(l) atomic_long_read(&(l)->a)
8313+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
8314 #define local_set(l,i) atomic_long_set(&(l)->a, (i))
8315+#define local_set_unchecked(l,i) atomic_long_set_unchecked(&(l)->a, (i))
8316
8317 #define local_add(i,l) atomic_long_add((i),(&(l)->a))
8318+#define local_add_unchecked(i,l) atomic_long_add_unchecked((i),(&(l)->a))
8319 #define local_sub(i,l) atomic_long_sub((i),(&(l)->a))
8320+#define local_sub_unchecked(i,l) atomic_long_sub_unchecked((i),(&(l)->a))
8321 #define local_inc(l) atomic_long_inc(&(l)->a)
8322+#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
8323 #define local_dec(l) atomic_long_dec(&(l)->a)
8324+#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
8325
8326 static __inline__ long local_add_return(long a, local_t *l)
8327 {
8328 long t;
8329
8330 __asm__ __volatile__(
8331+"1:" PPC_LLARX(%0,0,%2,0) " # local_add_return\n"
8332+
8333+#ifdef CONFIG_PAX_REFCOUNT
8334+" mcrxr cr0\n"
8335+" addo. %0,%1,%0\n"
8336+" bf 4*cr0+so, 3f\n"
8337+"2:.long " "0x00c00b00""\n"
8338+#else
8339+" add %0,%1,%0\n"
8340+#endif
8341+
8342+"3:\n"
8343+ PPC405_ERR77(0,%2)
8344+ PPC_STLCX "%0,0,%2 \n\
8345+ bne- 1b"
8346+
8347+#ifdef CONFIG_PAX_REFCOUNT
8348+"\n4:\n"
8349+ _ASM_EXTABLE(2b, 4b)
8350+#endif
8351+
8352+ : "=&r" (t)
8353+ : "r" (a), "r" (&(l->a.counter))
8354+ : "cc", "memory");
8355+
8356+ return t;
8357+}
8358+
8359+static __inline__ long local_add_return_unchecked(long a, local_unchecked_t *l)
8360+{
8361+ long t;
8362+
8363+ __asm__ __volatile__(
8364 "1:" PPC_LLARX(%0,0,%2,0) " # local_add_return\n\
8365 add %0,%1,%0\n"
8366 PPC405_ERR77(0,%2)
8367@@ -101,6 +145,8 @@ static __inline__ long local_dec_return(local_t *l)
8368
8369 #define local_cmpxchg(l, o, n) \
8370 (cmpxchg_local(&((l)->a.counter), (o), (n)))
8371+#define local_cmpxchg_unchecked(l, o, n) \
8372+ (cmpxchg_local(&((l)->a.counter), (o), (n)))
8373 #define local_xchg(l, n) (xchg_local(&((l)->a.counter), (n)))
8374
8375 /**
8376diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
8377index 8565c25..2865190 100644
8378--- a/arch/powerpc/include/asm/mman.h
8379+++ b/arch/powerpc/include/asm/mman.h
8380@@ -24,7 +24,7 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot)
8381 }
8382 #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
8383
8384-static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
8385+static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
8386 {
8387 return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
8388 }
8389diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
8390index 69c0598..2c56964 100644
8391--- a/arch/powerpc/include/asm/page.h
8392+++ b/arch/powerpc/include/asm/page.h
8393@@ -227,8 +227,9 @@ extern long long virt_phys_offset;
8394 * and needs to be executable. This means the whole heap ends
8395 * up being executable.
8396 */
8397-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
8398- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8399+#define VM_DATA_DEFAULT_FLAGS32 \
8400+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
8401+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8402
8403 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
8404 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8405@@ -256,6 +257,9 @@ extern long long virt_phys_offset;
8406 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
8407 #endif
8408
8409+#define ktla_ktva(addr) (addr)
8410+#define ktva_ktla(addr) (addr)
8411+
8412 #ifndef CONFIG_PPC_BOOK3S_64
8413 /*
8414 * Use the top bit of the higher-level page table entries to indicate whether
8415diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
8416index d908a46..3753f71 100644
8417--- a/arch/powerpc/include/asm/page_64.h
8418+++ b/arch/powerpc/include/asm/page_64.h
8419@@ -172,15 +172,18 @@ do { \
8420 * stack by default, so in the absence of a PT_GNU_STACK program header
8421 * we turn execute permission off.
8422 */
8423-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
8424- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8425+#define VM_STACK_DEFAULT_FLAGS32 \
8426+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
8427+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8428
8429 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
8430 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8431
8432+#ifndef CONFIG_PAX_PAGEEXEC
8433 #define VM_STACK_DEFAULT_FLAGS \
8434 (is_32bit_task() ? \
8435 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
8436+#endif
8437
8438 #include <asm-generic/getorder.h>
8439
8440diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h
8441index 4b0be20..c15a27d 100644
8442--- a/arch/powerpc/include/asm/pgalloc-64.h
8443+++ b/arch/powerpc/include/asm/pgalloc-64.h
8444@@ -54,6 +54,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
8445 #ifndef CONFIG_PPC_64K_PAGES
8446
8447 #define pgd_populate(MM, PGD, PUD) pgd_set(PGD, PUD)
8448+#define pgd_populate_kernel(MM, PGD, PUD) pgd_populate((MM), (PGD), (PUD))
8449
8450 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
8451 {
8452@@ -71,6 +72,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
8453 pud_set(pud, (unsigned long)pmd);
8454 }
8455
8456+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
8457+{
8458+ pud_populate(mm, pud, pmd);
8459+}
8460+
8461 #define pmd_populate(mm, pmd, pte_page) \
8462 pmd_populate_kernel(mm, pmd, page_address(pte_page))
8463 #define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte))
8464@@ -173,6 +179,7 @@ extern void __tlb_remove_table(void *_table);
8465 #endif
8466
8467 #define pud_populate(mm, pud, pmd) pud_set(pud, (unsigned long)pmd)
8468+#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
8469
8470 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
8471 pte_t *pte)
8472diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
8473index 9835ac4..900430f 100644
8474--- a/arch/powerpc/include/asm/pgtable.h
8475+++ b/arch/powerpc/include/asm/pgtable.h
8476@@ -2,6 +2,7 @@
8477 #define _ASM_POWERPC_PGTABLE_H
8478 #ifdef __KERNEL__
8479
8480+#include <linux/const.h>
8481 #ifndef __ASSEMBLY__
8482 #include <linux/mmdebug.h>
8483 #include <linux/mmzone.h>
8484diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
8485index 62cfb0c..50c6402 100644
8486--- a/arch/powerpc/include/asm/pte-hash32.h
8487+++ b/arch/powerpc/include/asm/pte-hash32.h
8488@@ -20,6 +20,7 @@
8489 #define _PAGE_HASHPTE 0x002 /* hash_page has made an HPTE for this pte */
8490 #define _PAGE_USER 0x004 /* usermode access allowed */
8491 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
8492+#define _PAGE_EXEC _PAGE_GUARDED
8493 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
8494 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
8495 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
8496diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
8497index af56b5c..f86f3f6 100644
8498--- a/arch/powerpc/include/asm/reg.h
8499+++ b/arch/powerpc/include/asm/reg.h
8500@@ -253,6 +253,7 @@
8501 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
8502 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
8503 #define DSISR_NOHPTE 0x40000000 /* no translation found */
8504+#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
8505 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
8506 #define DSISR_ISSTORE 0x02000000 /* access was a store */
8507 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
8508diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
8509index d607df5..08dc9ae 100644
8510--- a/arch/powerpc/include/asm/smp.h
8511+++ b/arch/powerpc/include/asm/smp.h
8512@@ -51,7 +51,7 @@ struct smp_ops_t {
8513 int (*cpu_disable)(void);
8514 void (*cpu_die)(unsigned int nr);
8515 int (*cpu_bootable)(unsigned int nr);
8516-};
8517+} __no_const;
8518
8519 extern void smp_send_debugger_break(void);
8520 extern void start_secondary_resume(void);
8521diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h
8522index 4dbe072..b803275 100644
8523--- a/arch/powerpc/include/asm/spinlock.h
8524+++ b/arch/powerpc/include/asm/spinlock.h
8525@@ -204,13 +204,29 @@ static inline long __arch_read_trylock(arch_rwlock_t *rw)
8526 __asm__ __volatile__(
8527 "1: " PPC_LWARX(%0,0,%1,1) "\n"
8528 __DO_SIGN_EXTEND
8529-" addic. %0,%0,1\n\
8530- ble- 2f\n"
8531+
8532+#ifdef CONFIG_PAX_REFCOUNT
8533+" mcrxr cr0\n"
8534+" addico. %0,%0,1\n"
8535+" bf 4*cr0+so, 3f\n"
8536+"2:.long " "0x00c00b00""\n"
8537+#else
8538+" addic. %0,%0,1\n"
8539+#endif
8540+
8541+"3:\n"
8542+ "ble- 4f\n"
8543 PPC405_ERR77(0,%1)
8544 " stwcx. %0,0,%1\n\
8545 bne- 1b\n"
8546 PPC_ACQUIRE_BARRIER
8547-"2:" : "=&r" (tmp)
8548+"4:"
8549+
8550+#ifdef CONFIG_PAX_REFCOUNT
8551+ _ASM_EXTABLE(2b,4b)
8552+#endif
8553+
8554+ : "=&r" (tmp)
8555 : "r" (&rw->lock)
8556 : "cr0", "xer", "memory");
8557
8558@@ -286,11 +302,27 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
8559 __asm__ __volatile__(
8560 "# read_unlock\n\t"
8561 PPC_RELEASE_BARRIER
8562-"1: lwarx %0,0,%1\n\
8563- addic %0,%0,-1\n"
8564+"1: lwarx %0,0,%1\n"
8565+
8566+#ifdef CONFIG_PAX_REFCOUNT
8567+" mcrxr cr0\n"
8568+" addico. %0,%0,-1\n"
8569+" bf 4*cr0+so, 3f\n"
8570+"2:.long " "0x00c00b00""\n"
8571+#else
8572+" addic. %0,%0,-1\n"
8573+#endif
8574+
8575+"3:\n"
8576 PPC405_ERR77(0,%1)
8577 " stwcx. %0,0,%1\n\
8578 bne- 1b"
8579+
8580+#ifdef CONFIG_PAX_REFCOUNT
8581+"\n4:\n"
8582+ _ASM_EXTABLE(2b, 4b)
8583+#endif
8584+
8585 : "=&r"(tmp)
8586 : "r"(&rw->lock)
8587 : "cr0", "xer", "memory");
8588diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
8589index 7248979..80b75de 100644
8590--- a/arch/powerpc/include/asm/thread_info.h
8591+++ b/arch/powerpc/include/asm/thread_info.h
8592@@ -103,6 +103,8 @@ static inline struct thread_info *current_thread_info(void)
8593 #if defined(CONFIG_PPC64)
8594 #define TIF_ELF2ABI 18 /* function descriptors must die! */
8595 #endif
8596+/* mask must be expressable within 16 bits to satisfy 'andi' instruction reqs */
8597+#define TIF_GRSEC_SETXID 6 /* update credentials on syscall entry/exit */
8598
8599 /* as above, but as bit values */
8600 #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
8601@@ -121,9 +123,10 @@ static inline struct thread_info *current_thread_info(void)
8602 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
8603 #define _TIF_EMULATE_STACK_STORE (1<<TIF_EMULATE_STACK_STORE)
8604 #define _TIF_NOHZ (1<<TIF_NOHZ)
8605+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
8606 #define _TIF_SYSCALL_DOTRACE (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
8607 _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \
8608- _TIF_NOHZ)
8609+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
8610
8611 #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
8612 _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
8613diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
8614index a0c071d..49cdc7f 100644
8615--- a/arch/powerpc/include/asm/uaccess.h
8616+++ b/arch/powerpc/include/asm/uaccess.h
8617@@ -58,6 +58,7 @@
8618
8619 #endif
8620
8621+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
8622 #define access_ok(type, addr, size) \
8623 (__chk_user_ptr(addr), \
8624 __access_ok((__force unsigned long)(addr), (size), get_fs()))
8625@@ -318,52 +319,6 @@ do { \
8626 extern unsigned long __copy_tofrom_user(void __user *to,
8627 const void __user *from, unsigned long size);
8628
8629-#ifndef __powerpc64__
8630-
8631-static inline unsigned long copy_from_user(void *to,
8632- const void __user *from, unsigned long n)
8633-{
8634- unsigned long over;
8635-
8636- if (access_ok(VERIFY_READ, from, n))
8637- return __copy_tofrom_user((__force void __user *)to, from, n);
8638- if ((unsigned long)from < TASK_SIZE) {
8639- over = (unsigned long)from + n - TASK_SIZE;
8640- return __copy_tofrom_user((__force void __user *)to, from,
8641- n - over) + over;
8642- }
8643- return n;
8644-}
8645-
8646-static inline unsigned long copy_to_user(void __user *to,
8647- const void *from, unsigned long n)
8648-{
8649- unsigned long over;
8650-
8651- if (access_ok(VERIFY_WRITE, to, n))
8652- return __copy_tofrom_user(to, (__force void __user *)from, n);
8653- if ((unsigned long)to < TASK_SIZE) {
8654- over = (unsigned long)to + n - TASK_SIZE;
8655- return __copy_tofrom_user(to, (__force void __user *)from,
8656- n - over) + over;
8657- }
8658- return n;
8659-}
8660-
8661-#else /* __powerpc64__ */
8662-
8663-#define __copy_in_user(to, from, size) \
8664- __copy_tofrom_user((to), (from), (size))
8665-
8666-extern unsigned long copy_from_user(void *to, const void __user *from,
8667- unsigned long n);
8668-extern unsigned long copy_to_user(void __user *to, const void *from,
8669- unsigned long n);
8670-extern unsigned long copy_in_user(void __user *to, const void __user *from,
8671- unsigned long n);
8672-
8673-#endif /* __powerpc64__ */
8674-
8675 static inline unsigned long __copy_from_user_inatomic(void *to,
8676 const void __user *from, unsigned long n)
8677 {
8678@@ -387,6 +342,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
8679 if (ret == 0)
8680 return 0;
8681 }
8682+
8683+ if (!__builtin_constant_p(n))
8684+ check_object_size(to, n, false);
8685+
8686 return __copy_tofrom_user((__force void __user *)to, from, n);
8687 }
8688
8689@@ -413,6 +372,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
8690 if (ret == 0)
8691 return 0;
8692 }
8693+
8694+ if (!__builtin_constant_p(n))
8695+ check_object_size(from, n, true);
8696+
8697 return __copy_tofrom_user(to, (__force const void __user *)from, n);
8698 }
8699
8700@@ -430,6 +393,92 @@ static inline unsigned long __copy_to_user(void __user *to,
8701 return __copy_to_user_inatomic(to, from, size);
8702 }
8703
8704+#ifndef __powerpc64__
8705+
8706+static inline unsigned long __must_check copy_from_user(void *to,
8707+ const void __user *from, unsigned long n)
8708+{
8709+ unsigned long over;
8710+
8711+ if ((long)n < 0)
8712+ return n;
8713+
8714+ if (access_ok(VERIFY_READ, from, n)) {
8715+ if (!__builtin_constant_p(n))
8716+ check_object_size(to, n, false);
8717+ return __copy_tofrom_user((__force void __user *)to, from, n);
8718+ }
8719+ if ((unsigned long)from < TASK_SIZE) {
8720+ over = (unsigned long)from + n - TASK_SIZE;
8721+ if (!__builtin_constant_p(n - over))
8722+ check_object_size(to, n - over, false);
8723+ return __copy_tofrom_user((__force void __user *)to, from,
8724+ n - over) + over;
8725+ }
8726+ return n;
8727+}
8728+
8729+static inline unsigned long __must_check copy_to_user(void __user *to,
8730+ const void *from, unsigned long n)
8731+{
8732+ unsigned long over;
8733+
8734+ if ((long)n < 0)
8735+ return n;
8736+
8737+ if (access_ok(VERIFY_WRITE, to, n)) {
8738+ if (!__builtin_constant_p(n))
8739+ check_object_size(from, n, true);
8740+ return __copy_tofrom_user(to, (__force void __user *)from, n);
8741+ }
8742+ if ((unsigned long)to < TASK_SIZE) {
8743+ over = (unsigned long)to + n - TASK_SIZE;
8744+ if (!__builtin_constant_p(n))
8745+ check_object_size(from, n - over, true);
8746+ return __copy_tofrom_user(to, (__force void __user *)from,
8747+ n - over) + over;
8748+ }
8749+ return n;
8750+}
8751+
8752+#else /* __powerpc64__ */
8753+
8754+#define __copy_in_user(to, from, size) \
8755+ __copy_tofrom_user((to), (from), (size))
8756+
8757+static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
8758+{
8759+ if ((long)n < 0 || n > INT_MAX)
8760+ return n;
8761+
8762+ if (!__builtin_constant_p(n))
8763+ check_object_size(to, n, false);
8764+
8765+ if (likely(access_ok(VERIFY_READ, from, n)))
8766+ n = __copy_from_user(to, from, n);
8767+ else
8768+ memset(to, 0, n);
8769+ return n;
8770+}
8771+
8772+static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
8773+{
8774+ if ((long)n < 0 || n > INT_MAX)
8775+ return n;
8776+
8777+ if (likely(access_ok(VERIFY_WRITE, to, n))) {
8778+ if (!__builtin_constant_p(n))
8779+ check_object_size(from, n, true);
8780+ n = __copy_to_user(to, from, n);
8781+ }
8782+ return n;
8783+}
8784+
8785+extern unsigned long copy_in_user(void __user *to, const void __user *from,
8786+ unsigned long n);
8787+
8788+#endif /* __powerpc64__ */
8789+
8790 extern unsigned long __clear_user(void __user *addr, unsigned long size);
8791
8792 static inline unsigned long clear_user(void __user *addr, unsigned long size)
8793diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
8794index 502cf69..53936a1 100644
8795--- a/arch/powerpc/kernel/Makefile
8796+++ b/arch/powerpc/kernel/Makefile
8797@@ -15,6 +15,11 @@ CFLAGS_prom_init.o += -fPIC
8798 CFLAGS_btext.o += -fPIC
8799 endif
8800
8801+CFLAGS_REMOVE_cputable.o = $(LATENT_ENTROPY_PLUGIN_CFLAGS)
8802+CFLAGS_REMOVE_prom_init.o = $(LATENT_ENTROPY_PLUGIN_CFLAGS)
8803+CFLAGS_REMOVE_btext.o = $(LATENT_ENTROPY_PLUGIN_CFLAGS)
8804+CFLAGS_REMOVE_prom.o = $(LATENT_ENTROPY_PLUGIN_CFLAGS)
8805+
8806 ifdef CONFIG_FUNCTION_TRACER
8807 # Do not trace early boot code
8808 CFLAGS_REMOVE_cputable.o = -pg -mno-sched-epilog
8809@@ -27,6 +32,8 @@ CFLAGS_REMOVE_ftrace.o = -pg -mno-sched-epilog
8810 CFLAGS_REMOVE_time.o = -pg -mno-sched-epilog
8811 endif
8812
8813+CFLAGS_REMOVE_prom_init.o += $(LATENT_ENTROPY_PLUGIN_CFLAGS)
8814+
8815 obj-y := cputable.o ptrace.o syscalls.o \
8816 irq.o align.o signal_32.o pmc.o vdso.o \
8817 process.o systbl.o idle.o \
8818diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
8819index 3e68d1c..72a5ee6 100644
8820--- a/arch/powerpc/kernel/exceptions-64e.S
8821+++ b/arch/powerpc/kernel/exceptions-64e.S
8822@@ -1010,6 +1010,7 @@ storage_fault_common:
8823 std r14,_DAR(r1)
8824 std r15,_DSISR(r1)
8825 addi r3,r1,STACK_FRAME_OVERHEAD
8826+ bl save_nvgprs
8827 mr r4,r14
8828 mr r5,r15
8829 ld r14,PACA_EXGEN+EX_R14(r13)
8830@@ -1018,8 +1019,7 @@ storage_fault_common:
8831 cmpdi r3,0
8832 bne- 1f
8833 b ret_from_except_lite
8834-1: bl save_nvgprs
8835- mr r5,r3
8836+1: mr r5,r3
8837 addi r3,r1,STACK_FRAME_OVERHEAD
8838 ld r4,_DAR(r1)
8839 bl bad_page_fault
8840diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
8841index 9519e6b..13f6c38 100644
8842--- a/arch/powerpc/kernel/exceptions-64s.S
8843+++ b/arch/powerpc/kernel/exceptions-64s.S
8844@@ -1599,10 +1599,10 @@ handle_page_fault:
8845 11: ld r4,_DAR(r1)
8846 ld r5,_DSISR(r1)
8847 addi r3,r1,STACK_FRAME_OVERHEAD
8848+ bl save_nvgprs
8849 bl do_page_fault
8850 cmpdi r3,0
8851 beq+ 12f
8852- bl save_nvgprs
8853 mr r5,r3
8854 addi r3,r1,STACK_FRAME_OVERHEAD
8855 lwz r4,_DAR(r1)
8856diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
8857index 4509603..cdb491f 100644
8858--- a/arch/powerpc/kernel/irq.c
8859+++ b/arch/powerpc/kernel/irq.c
8860@@ -460,6 +460,8 @@ void migrate_irqs(void)
8861 }
8862 #endif
8863
8864+extern void gr_handle_kernel_exploit(void);
8865+
8866 static inline void check_stack_overflow(void)
8867 {
8868 #ifdef CONFIG_DEBUG_STACKOVERFLOW
8869@@ -472,6 +474,7 @@ static inline void check_stack_overflow(void)
8870 pr_err("do_IRQ: stack overflow: %ld\n",
8871 sp - sizeof(struct thread_info));
8872 dump_stack();
8873+ gr_handle_kernel_exploit();
8874 }
8875 #endif
8876 }
8877diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
8878index c94d2e0..992a9ce 100644
8879--- a/arch/powerpc/kernel/module_32.c
8880+++ b/arch/powerpc/kernel/module_32.c
8881@@ -158,7 +158,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
8882 me->arch.core_plt_section = i;
8883 }
8884 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
8885- pr_err("Module doesn't contain .plt or .init.plt sections.\n");
8886+ pr_err("Module $s doesn't contain .plt or .init.plt sections.\n", me->name);
8887 return -ENOEXEC;
8888 }
8889
8890@@ -188,11 +188,16 @@ static uint32_t do_plt_call(void *location,
8891
8892 pr_debug("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
8893 /* Init, or core PLT? */
8894- if (location >= mod->module_core
8895- && location < mod->module_core + mod->core_size)
8896+ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
8897+ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
8898 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
8899- else
8900+ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
8901+ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
8902 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
8903+ else {
8904+ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
8905+ return ~0UL;
8906+ }
8907
8908 /* Find this entry, or if that fails, the next avail. entry */
8909 while (entry->jump[0]) {
8910@@ -296,7 +301,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
8911 }
8912 #ifdef CONFIG_DYNAMIC_FTRACE
8913 module->arch.tramp =
8914- do_plt_call(module->module_core,
8915+ do_plt_call(module->module_core_rx,
8916 (unsigned long)ftrace_caller,
8917 sechdrs, module);
8918 #endif
8919diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
8920index b4cc7be..1fe8bb3 100644
8921--- a/arch/powerpc/kernel/process.c
8922+++ b/arch/powerpc/kernel/process.c
8923@@ -1036,8 +1036,8 @@ void show_regs(struct pt_regs * regs)
8924 * Lookup NIP late so we have the best change of getting the
8925 * above info out without failing
8926 */
8927- printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
8928- printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
8929+ printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
8930+ printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
8931 #endif
8932 show_stack(current, (unsigned long *) regs->gpr[1]);
8933 if (!user_mode(regs))
8934@@ -1549,10 +1549,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
8935 newsp = stack[0];
8936 ip = stack[STACK_FRAME_LR_SAVE];
8937 if (!firstframe || ip != lr) {
8938- printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
8939+ printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
8940 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
8941 if ((ip == rth) && curr_frame >= 0) {
8942- printk(" (%pS)",
8943+ printk(" (%pA)",
8944 (void *)current->ret_stack[curr_frame].ret);
8945 curr_frame--;
8946 }
8947@@ -1572,7 +1572,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
8948 struct pt_regs *regs = (struct pt_regs *)
8949 (sp + STACK_FRAME_OVERHEAD);
8950 lr = regs->link;
8951- printk("--- interrupt: %lx at %pS\n LR = %pS\n",
8952+ printk("--- interrupt: %lx at %pA\n LR = %pA\n",
8953 regs->trap, (void *)regs->nip, (void *)lr);
8954 firstframe = 1;
8955 }
8956@@ -1608,49 +1608,3 @@ void notrace __ppc64_runlatch_off(void)
8957 mtspr(SPRN_CTRLT, ctrl);
8958 }
8959 #endif /* CONFIG_PPC64 */
8960-
8961-unsigned long arch_align_stack(unsigned long sp)
8962-{
8963- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
8964- sp -= get_random_int() & ~PAGE_MASK;
8965- return sp & ~0xf;
8966-}
8967-
8968-static inline unsigned long brk_rnd(void)
8969-{
8970- unsigned long rnd = 0;
8971-
8972- /* 8MB for 32bit, 1GB for 64bit */
8973- if (is_32bit_task())
8974- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
8975- else
8976- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
8977-
8978- return rnd << PAGE_SHIFT;
8979-}
8980-
8981-unsigned long arch_randomize_brk(struct mm_struct *mm)
8982-{
8983- unsigned long base = mm->brk;
8984- unsigned long ret;
8985-
8986-#ifdef CONFIG_PPC_STD_MMU_64
8987- /*
8988- * If we are using 1TB segments and we are allowed to randomise
8989- * the heap, we can put it above 1TB so it is backed by a 1TB
8990- * segment. Otherwise the heap will be in the bottom 1TB
8991- * which always uses 256MB segments and this may result in a
8992- * performance penalty.
8993- */
8994- if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
8995- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
8996-#endif
8997-
8998- ret = PAGE_ALIGN(base + brk_rnd());
8999-
9000- if (ret < mm->brk)
9001- return mm->brk;
9002-
9003- return ret;
9004-}
9005-
9006diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
9007index f21897b..28c0428 100644
9008--- a/arch/powerpc/kernel/ptrace.c
9009+++ b/arch/powerpc/kernel/ptrace.c
9010@@ -1762,6 +1762,10 @@ long arch_ptrace(struct task_struct *child, long request,
9011 return ret;
9012 }
9013
9014+#ifdef CONFIG_GRKERNSEC_SETXID
9015+extern void gr_delayed_cred_worker(void);
9016+#endif
9017+
9018 /*
9019 * We must return the syscall number to actually look up in the table.
9020 * This can be -1L to skip running any syscall at all.
9021@@ -1774,6 +1778,11 @@ long do_syscall_trace_enter(struct pt_regs *regs)
9022
9023 secure_computing_strict(regs->gpr[0]);
9024
9025+#ifdef CONFIG_GRKERNSEC_SETXID
9026+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
9027+ gr_delayed_cred_worker();
9028+#endif
9029+
9030 if (test_thread_flag(TIF_SYSCALL_TRACE) &&
9031 tracehook_report_syscall_entry(regs))
9032 /*
9033@@ -1805,6 +1814,11 @@ void do_syscall_trace_leave(struct pt_regs *regs)
9034 {
9035 int step;
9036
9037+#ifdef CONFIG_GRKERNSEC_SETXID
9038+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
9039+ gr_delayed_cred_worker();
9040+#endif
9041+
9042 audit_syscall_exit(regs);
9043
9044 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
9045diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
9046index d3a831a..3a33123 100644
9047--- a/arch/powerpc/kernel/signal_32.c
9048+++ b/arch/powerpc/kernel/signal_32.c
9049@@ -1011,7 +1011,7 @@ int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
9050 /* Save user registers on the stack */
9051 frame = &rt_sf->uc.uc_mcontext;
9052 addr = frame;
9053- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
9054+ if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
9055 sigret = 0;
9056 tramp = current->mm->context.vdso_base + vdso32_rt_sigtramp;
9057 } else {
9058diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
9059index c7c24d2..1bf7039 100644
9060--- a/arch/powerpc/kernel/signal_64.c
9061+++ b/arch/powerpc/kernel/signal_64.c
9062@@ -754,7 +754,7 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs
9063 current->thread.fp_state.fpscr = 0;
9064
9065 /* Set up to return from userspace. */
9066- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
9067+ if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
9068 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
9069 } else {
9070 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
9071diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
9072index 19e4744..28a8d7b 100644
9073--- a/arch/powerpc/kernel/traps.c
9074+++ b/arch/powerpc/kernel/traps.c
9075@@ -36,6 +36,7 @@
9076 #include <linux/debugfs.h>
9077 #include <linux/ratelimit.h>
9078 #include <linux/context_tracking.h>
9079+#include <linux/uaccess.h>
9080
9081 #include <asm/emulated_ops.h>
9082 #include <asm/pgtable.h>
9083@@ -142,6 +143,8 @@ static unsigned __kprobes long oops_begin(struct pt_regs *regs)
9084 return flags;
9085 }
9086
9087+extern void gr_handle_kernel_exploit(void);
9088+
9089 static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
9090 int signr)
9091 {
9092@@ -191,6 +194,9 @@ static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
9093 panic("Fatal exception in interrupt");
9094 if (panic_on_oops)
9095 panic("Fatal exception");
9096+
9097+ gr_handle_kernel_exploit();
9098+
9099 do_exit(signr);
9100 }
9101
9102@@ -1137,6 +1143,26 @@ void __kprobes program_check_exception(struct pt_regs *regs)
9103 enum ctx_state prev_state = exception_enter();
9104 unsigned int reason = get_reason(regs);
9105
9106+#ifdef CONFIG_PAX_REFCOUNT
9107+ unsigned int bkpt;
9108+ const struct exception_table_entry *entry;
9109+
9110+ if (reason & REASON_ILLEGAL) {
9111+ /* Check if PaX bad instruction */
9112+ if (!probe_kernel_address(regs->nip, bkpt) && bkpt == 0xc00b00) {
9113+ current->thread.trap_nr = 0;
9114+ pax_report_refcount_overflow(regs);
9115+ /* fixup_exception() for PowerPC does not exist, simulate its job */
9116+ if ((entry = search_exception_tables(regs->nip)) != NULL) {
9117+ regs->nip = entry->fixup;
9118+ return;
9119+ }
9120+ /* fixup_exception() could not handle */
9121+ goto bail;
9122+ }
9123+ }
9124+#endif
9125+
9126 /* We can now get here via a FP Unavailable exception if the core
9127 * has no FPU, in that case the reason flags will be 0 */
9128
9129diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
9130index 305eb0d..accc5b40 100644
9131--- a/arch/powerpc/kernel/vdso.c
9132+++ b/arch/powerpc/kernel/vdso.c
9133@@ -34,6 +34,7 @@
9134 #include <asm/vdso.h>
9135 #include <asm/vdso_datapage.h>
9136 #include <asm/setup.h>
9137+#include <asm/mman.h>
9138
9139 #undef DEBUG
9140
9141@@ -220,7 +221,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
9142 vdso_base = VDSO32_MBASE;
9143 #endif
9144
9145- current->mm->context.vdso_base = 0;
9146+ current->mm->context.vdso_base = ~0UL;
9147
9148 /* vDSO has a problem and was disabled, just don't "enable" it for the
9149 * process
9150@@ -240,7 +241,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
9151 vdso_base = get_unmapped_area(NULL, vdso_base,
9152 (vdso_pages << PAGE_SHIFT) +
9153 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
9154- 0, 0);
9155+ 0, MAP_PRIVATE | MAP_EXECUTABLE);
9156 if (IS_ERR_VALUE(vdso_base)) {
9157 rc = vdso_base;
9158 goto fail_mmapsem;
9159diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
9160index 27c0fac..6ec4a32 100644
9161--- a/arch/powerpc/kvm/powerpc.c
9162+++ b/arch/powerpc/kvm/powerpc.c
9163@@ -1402,7 +1402,7 @@ void kvmppc_init_lpid(unsigned long nr_lpids_param)
9164 }
9165 EXPORT_SYMBOL_GPL(kvmppc_init_lpid);
9166
9167-int kvm_arch_init(void *opaque)
9168+int kvm_arch_init(const void *opaque)
9169 {
9170 return 0;
9171 }
9172diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
9173index 5eea6f3..5d10396 100644
9174--- a/arch/powerpc/lib/usercopy_64.c
9175+++ b/arch/powerpc/lib/usercopy_64.c
9176@@ -9,22 +9,6 @@
9177 #include <linux/module.h>
9178 #include <asm/uaccess.h>
9179
9180-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
9181-{
9182- if (likely(access_ok(VERIFY_READ, from, n)))
9183- n = __copy_from_user(to, from, n);
9184- else
9185- memset(to, 0, n);
9186- return n;
9187-}
9188-
9189-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
9190-{
9191- if (likely(access_ok(VERIFY_WRITE, to, n)))
9192- n = __copy_to_user(to, from, n);
9193- return n;
9194-}
9195-
9196 unsigned long copy_in_user(void __user *to, const void __user *from,
9197 unsigned long n)
9198 {
9199@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
9200 return n;
9201 }
9202
9203-EXPORT_SYMBOL(copy_from_user);
9204-EXPORT_SYMBOL(copy_to_user);
9205 EXPORT_SYMBOL(copy_in_user);
9206
9207diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
9208index b396868..3eb6b9f 100644
9209--- a/arch/powerpc/mm/fault.c
9210+++ b/arch/powerpc/mm/fault.c
9211@@ -33,6 +33,10 @@
9212 #include <linux/ratelimit.h>
9213 #include <linux/context_tracking.h>
9214 #include <linux/hugetlb.h>
9215+#include <linux/slab.h>
9216+#include <linux/pagemap.h>
9217+#include <linux/compiler.h>
9218+#include <linux/unistd.h>
9219
9220 #include <asm/firmware.h>
9221 #include <asm/page.h>
9222@@ -68,6 +72,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
9223 }
9224 #endif
9225
9226+#ifdef CONFIG_PAX_PAGEEXEC
9227+/*
9228+ * PaX: decide what to do with offenders (regs->nip = fault address)
9229+ *
9230+ * returns 1 when task should be killed
9231+ */
9232+static int pax_handle_fetch_fault(struct pt_regs *regs)
9233+{
9234+ return 1;
9235+}
9236+
9237+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
9238+{
9239+ unsigned long i;
9240+
9241+ printk(KERN_ERR "PAX: bytes at PC: ");
9242+ for (i = 0; i < 5; i++) {
9243+ unsigned int c;
9244+ if (get_user(c, (unsigned int __user *)pc+i))
9245+ printk(KERN_CONT "???????? ");
9246+ else
9247+ printk(KERN_CONT "%08x ", c);
9248+ }
9249+ printk("\n");
9250+}
9251+#endif
9252+
9253 /*
9254 * Check whether the instruction at regs->nip is a store using
9255 * an update addressing form which will update r1.
9256@@ -227,7 +258,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
9257 * indicate errors in DSISR but can validly be set in SRR1.
9258 */
9259 if (trap == 0x400)
9260- error_code &= 0x48200000;
9261+ error_code &= 0x58200000;
9262 else
9263 is_write = error_code & DSISR_ISSTORE;
9264 #else
9265@@ -383,12 +414,16 @@ good_area:
9266 * "undefined". Of those that can be set, this is the only
9267 * one which seems bad.
9268 */
9269- if (error_code & 0x10000000)
9270+ if (error_code & DSISR_GUARDED)
9271 /* Guarded storage error. */
9272 goto bad_area;
9273 #endif /* CONFIG_8xx */
9274
9275 if (is_exec) {
9276+#ifdef CONFIG_PPC_STD_MMU
9277+ if (error_code & DSISR_GUARDED)
9278+ goto bad_area;
9279+#endif
9280 /*
9281 * Allow execution from readable areas if the MMU does not
9282 * provide separate controls over reading and executing.
9283@@ -483,6 +518,23 @@ bad_area:
9284 bad_area_nosemaphore:
9285 /* User mode accesses cause a SIGSEGV */
9286 if (user_mode(regs)) {
9287+
9288+#ifdef CONFIG_PAX_PAGEEXEC
9289+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
9290+#ifdef CONFIG_PPC_STD_MMU
9291+ if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
9292+#else
9293+ if (is_exec && regs->nip == address) {
9294+#endif
9295+ switch (pax_handle_fetch_fault(regs)) {
9296+ }
9297+
9298+ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
9299+ do_group_exit(SIGKILL);
9300+ }
9301+ }
9302+#endif
9303+
9304 _exception(SIGSEGV, regs, code, address);
9305 goto bail;
9306 }
9307diff --git a/arch/powerpc/mm/mmap.c b/arch/powerpc/mm/mmap.c
9308index cb8bdbe..cde4bc7 100644
9309--- a/arch/powerpc/mm/mmap.c
9310+++ b/arch/powerpc/mm/mmap.c
9311@@ -53,10 +53,14 @@ static inline int mmap_is_legacy(void)
9312 return sysctl_legacy_va_layout;
9313 }
9314
9315-static unsigned long mmap_rnd(void)
9316+static unsigned long mmap_rnd(struct mm_struct *mm)
9317 {
9318 unsigned long rnd = 0;
9319
9320+#ifdef CONFIG_PAX_RANDMMAP
9321+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
9322+#endif
9323+
9324 if (current->flags & PF_RANDOMIZE) {
9325 /* 8MB for 32bit, 1GB for 64bit */
9326 if (is_32bit_task())
9327@@ -67,7 +71,7 @@ static unsigned long mmap_rnd(void)
9328 return rnd << PAGE_SHIFT;
9329 }
9330
9331-static inline unsigned long mmap_base(void)
9332+static inline unsigned long mmap_base(struct mm_struct *mm)
9333 {
9334 unsigned long gap = rlimit(RLIMIT_STACK);
9335
9336@@ -76,7 +80,7 @@ static inline unsigned long mmap_base(void)
9337 else if (gap > MAX_GAP)
9338 gap = MAX_GAP;
9339
9340- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
9341+ return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd(mm));
9342 }
9343
9344 /*
9345@@ -91,9 +95,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
9346 */
9347 if (mmap_is_legacy()) {
9348 mm->mmap_base = TASK_UNMAPPED_BASE;
9349+
9350+#ifdef CONFIG_PAX_RANDMMAP
9351+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9352+ mm->mmap_base += mm->delta_mmap;
9353+#endif
9354+
9355 mm->get_unmapped_area = arch_get_unmapped_area;
9356 } else {
9357- mm->mmap_base = mmap_base();
9358+ mm->mmap_base = mmap_base(mm);
9359+
9360+#ifdef CONFIG_PAX_RANDMMAP
9361+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9362+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
9363+#endif
9364+
9365 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
9366 }
9367 }
9368diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
9369index 0f432a7..abfe841 100644
9370--- a/arch/powerpc/mm/slice.c
9371+++ b/arch/powerpc/mm/slice.c
9372@@ -105,7 +105,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
9373 if ((mm->task_size - len) < addr)
9374 return 0;
9375 vma = find_vma(mm, addr);
9376- return (!vma || (addr + len) <= vma->vm_start);
9377+ return check_heap_stack_gap(vma, addr, len, 0);
9378 }
9379
9380 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
9381@@ -277,6 +277,12 @@ static unsigned long slice_find_area_bottomup(struct mm_struct *mm,
9382 info.align_offset = 0;
9383
9384 addr = TASK_UNMAPPED_BASE;
9385+
9386+#ifdef CONFIG_PAX_RANDMMAP
9387+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9388+ addr += mm->delta_mmap;
9389+#endif
9390+
9391 while (addr < TASK_SIZE) {
9392 info.low_limit = addr;
9393 if (!slice_scan_available(addr, available, 1, &addr))
9394@@ -410,6 +416,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
9395 if (fixed && addr > (mm->task_size - len))
9396 return -ENOMEM;
9397
9398+#ifdef CONFIG_PAX_RANDMMAP
9399+ if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
9400+ addr = 0;
9401+#endif
9402+
9403 /* If hint, make sure it matches our alignment restrictions */
9404 if (!fixed && addr) {
9405 addr = _ALIGN_UP(addr, 1ul << pshift);
9406diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
9407index d966bbe..372124a 100644
9408--- a/arch/powerpc/platforms/cell/spufs/file.c
9409+++ b/arch/powerpc/platforms/cell/spufs/file.c
9410@@ -280,9 +280,9 @@ spufs_mem_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
9411 return VM_FAULT_NOPAGE;
9412 }
9413
9414-static int spufs_mem_mmap_access(struct vm_area_struct *vma,
9415+static ssize_t spufs_mem_mmap_access(struct vm_area_struct *vma,
9416 unsigned long address,
9417- void *buf, int len, int write)
9418+ void *buf, size_t len, int write)
9419 {
9420 struct spu_context *ctx = vma->vm_file->private_data;
9421 unsigned long offset = address - vma->vm_start;
9422diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
9423index fa934fe..c296056 100644
9424--- a/arch/s390/include/asm/atomic.h
9425+++ b/arch/s390/include/asm/atomic.h
9426@@ -412,4 +412,14 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
9427 #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
9428 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
9429
9430+#define atomic64_read_unchecked(v) atomic64_read(v)
9431+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
9432+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
9433+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
9434+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
9435+#define atomic64_inc_unchecked(v) atomic64_inc(v)
9436+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
9437+#define atomic64_dec_unchecked(v) atomic64_dec(v)
9438+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
9439+
9440 #endif /* __ARCH_S390_ATOMIC__ */
9441diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h
9442index 8d72471..5322500 100644
9443--- a/arch/s390/include/asm/barrier.h
9444+++ b/arch/s390/include/asm/barrier.h
9445@@ -42,7 +42,7 @@
9446 do { \
9447 compiletime_assert_atomic_type(*p); \
9448 barrier(); \
9449- ACCESS_ONCE(*p) = (v); \
9450+ ACCESS_ONCE_RW(*p) = (v); \
9451 } while (0)
9452
9453 #define smp_load_acquire(p) \
9454diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h
9455index 4d7ccac..d03d0ad 100644
9456--- a/arch/s390/include/asm/cache.h
9457+++ b/arch/s390/include/asm/cache.h
9458@@ -9,8 +9,10 @@
9459 #ifndef __ARCH_S390_CACHE_H
9460 #define __ARCH_S390_CACHE_H
9461
9462-#define L1_CACHE_BYTES 256
9463+#include <linux/const.h>
9464+
9465 #define L1_CACHE_SHIFT 8
9466+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
9467 #define NET_SKB_PAD 32
9468
9469 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
9470diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
9471index c9c875d..b4b0e4c 100644
9472--- a/arch/s390/include/asm/elf.h
9473+++ b/arch/s390/include/asm/elf.h
9474@@ -163,8 +163,14 @@ extern unsigned int vdso_enabled;
9475 the loader. We need to make sure that it is out of the way of the program
9476 that it will "exec", and that there is sufficient room for the brk. */
9477
9478-extern unsigned long randomize_et_dyn(void);
9479-#define ELF_ET_DYN_BASE randomize_et_dyn()
9480+#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
9481+
9482+#ifdef CONFIG_PAX_ASLR
9483+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
9484+
9485+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
9486+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
9487+#endif
9488
9489 /* This yields a mask that user programs can use to figure out what
9490 instruction set this CPU supports. */
9491@@ -225,9 +231,6 @@ struct linux_binprm;
9492 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
9493 int arch_setup_additional_pages(struct linux_binprm *, int);
9494
9495-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
9496-#define arch_randomize_brk arch_randomize_brk
9497-
9498 void *fill_cpu_elf_notes(void *ptr, struct save_area *sa, __vector128 *vxrs);
9499
9500 #endif
9501diff --git a/arch/s390/include/asm/exec.h b/arch/s390/include/asm/exec.h
9502index c4a93d6..4d2a9b4 100644
9503--- a/arch/s390/include/asm/exec.h
9504+++ b/arch/s390/include/asm/exec.h
9505@@ -7,6 +7,6 @@
9506 #ifndef __ASM_EXEC_H
9507 #define __ASM_EXEC_H
9508
9509-extern unsigned long arch_align_stack(unsigned long sp);
9510+#define arch_align_stack(x) ((x) & ~0xfUL)
9511
9512 #endif /* __ASM_EXEC_H */
9513diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
9514index cd4c68e..6764641 100644
9515--- a/arch/s390/include/asm/uaccess.h
9516+++ b/arch/s390/include/asm/uaccess.h
9517@@ -59,6 +59,7 @@ static inline int __range_ok(unsigned long addr, unsigned long size)
9518 __range_ok((unsigned long)(addr), (size)); \
9519 })
9520
9521+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
9522 #define access_ok(type, addr, size) __access_ok(addr, size)
9523
9524 /*
9525@@ -275,6 +276,10 @@ static inline unsigned long __must_check
9526 copy_to_user(void __user *to, const void *from, unsigned long n)
9527 {
9528 might_fault();
9529+
9530+ if ((long)n < 0)
9531+ return n;
9532+
9533 return __copy_to_user(to, from, n);
9534 }
9535
9536@@ -303,10 +308,14 @@ __compiletime_warning("copy_from_user() buffer size is not provably correct")
9537 static inline unsigned long __must_check
9538 copy_from_user(void *to, const void __user *from, unsigned long n)
9539 {
9540- unsigned int sz = __compiletime_object_size(to);
9541+ size_t sz = __compiletime_object_size(to);
9542
9543 might_fault();
9544- if (unlikely(sz != -1 && sz < n)) {
9545+
9546+ if ((long)n < 0)
9547+ return n;
9548+
9549+ if (unlikely(sz != (size_t)-1 && sz < n)) {
9550 copy_from_user_overflow();
9551 return n;
9552 }
9553diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
9554index 2ca9586..55682a9 100644
9555--- a/arch/s390/kernel/module.c
9556+++ b/arch/s390/kernel/module.c
9557@@ -165,11 +165,11 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
9558
9559 /* Increase core size by size of got & plt and set start
9560 offsets for got and plt. */
9561- me->core_size = ALIGN(me->core_size, 4);
9562- me->arch.got_offset = me->core_size;
9563- me->core_size += me->arch.got_size;
9564- me->arch.plt_offset = me->core_size;
9565- me->core_size += me->arch.plt_size;
9566+ me->core_size_rw = ALIGN(me->core_size_rw, 4);
9567+ me->arch.got_offset = me->core_size_rw;
9568+ me->core_size_rw += me->arch.got_size;
9569+ me->arch.plt_offset = me->core_size_rx;
9570+ me->core_size_rx += me->arch.plt_size;
9571 return 0;
9572 }
9573
9574@@ -285,7 +285,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9575 if (info->got_initialized == 0) {
9576 Elf_Addr *gotent;
9577
9578- gotent = me->module_core + me->arch.got_offset +
9579+ gotent = me->module_core_rw + me->arch.got_offset +
9580 info->got_offset;
9581 *gotent = val;
9582 info->got_initialized = 1;
9583@@ -308,7 +308,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9584 rc = apply_rela_bits(loc, val, 0, 64, 0);
9585 else if (r_type == R_390_GOTENT ||
9586 r_type == R_390_GOTPLTENT) {
9587- val += (Elf_Addr) me->module_core - loc;
9588+ val += (Elf_Addr) me->module_core_rw - loc;
9589 rc = apply_rela_bits(loc, val, 1, 32, 1);
9590 }
9591 break;
9592@@ -321,7 +321,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9593 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
9594 if (info->plt_initialized == 0) {
9595 unsigned int *ip;
9596- ip = me->module_core + me->arch.plt_offset +
9597+ ip = me->module_core_rx + me->arch.plt_offset +
9598 info->plt_offset;
9599 #ifndef CONFIG_64BIT
9600 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
9601@@ -346,7 +346,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9602 val - loc + 0xffffUL < 0x1ffffeUL) ||
9603 (r_type == R_390_PLT32DBL &&
9604 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
9605- val = (Elf_Addr) me->module_core +
9606+ val = (Elf_Addr) me->module_core_rx +
9607 me->arch.plt_offset +
9608 info->plt_offset;
9609 val += rela->r_addend - loc;
9610@@ -368,7 +368,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9611 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
9612 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
9613 val = val + rela->r_addend -
9614- ((Elf_Addr) me->module_core + me->arch.got_offset);
9615+ ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
9616 if (r_type == R_390_GOTOFF16)
9617 rc = apply_rela_bits(loc, val, 0, 16, 0);
9618 else if (r_type == R_390_GOTOFF32)
9619@@ -378,7 +378,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9620 break;
9621 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
9622 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
9623- val = (Elf_Addr) me->module_core + me->arch.got_offset +
9624+ val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
9625 rela->r_addend - loc;
9626 if (r_type == R_390_GOTPC)
9627 rc = apply_rela_bits(loc, val, 1, 32, 0);
9628diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
9629index 13fc097..84d375f 100644
9630--- a/arch/s390/kernel/process.c
9631+++ b/arch/s390/kernel/process.c
9632@@ -227,27 +227,3 @@ unsigned long get_wchan(struct task_struct *p)
9633 }
9634 return 0;
9635 }
9636-
9637-unsigned long arch_align_stack(unsigned long sp)
9638-{
9639- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
9640- sp -= get_random_int() & ~PAGE_MASK;
9641- return sp & ~0xf;
9642-}
9643-
9644-static inline unsigned long brk_rnd(void)
9645-{
9646- /* 8MB for 32bit, 1GB for 64bit */
9647- if (is_32bit_task())
9648- return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
9649- else
9650- return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
9651-}
9652-
9653-unsigned long arch_randomize_brk(struct mm_struct *mm)
9654-{
9655- unsigned long ret;
9656-
9657- ret = PAGE_ALIGN(mm->brk + brk_rnd());
9658- return (ret > mm->brk) ? ret : mm->brk;
9659-}
9660diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
9661index 179a2c2..4ba9137 100644
9662--- a/arch/s390/mm/mmap.c
9663+++ b/arch/s390/mm/mmap.c
9664@@ -62,6 +62,12 @@ static inline int mmap_is_legacy(void)
9665
9666 static unsigned long mmap_rnd(void)
9667 {
9668+
9669+#ifdef CONFIG_PAX_RANDMMAP
9670+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
9671+ return 0;
9672+#endif
9673+
9674 if (!(current->flags & PF_RANDOMIZE))
9675 return 0;
9676 if (is_32bit_task())
9677@@ -204,9 +210,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
9678 */
9679 if (mmap_is_legacy()) {
9680 mm->mmap_base = mmap_base_legacy();
9681+
9682+#ifdef CONFIG_PAX_RANDMMAP
9683+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9684+ mm->mmap_base += mm->delta_mmap;
9685+#endif
9686+
9687 mm->get_unmapped_area = arch_get_unmapped_area;
9688 } else {
9689 mm->mmap_base = mmap_base();
9690+
9691+#ifdef CONFIG_PAX_RANDMMAP
9692+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9693+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
9694+#endif
9695+
9696 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
9697 }
9698 }
9699@@ -279,9 +297,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
9700 */
9701 if (mmap_is_legacy()) {
9702 mm->mmap_base = mmap_base_legacy();
9703+
9704+#ifdef CONFIG_PAX_RANDMMAP
9705+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9706+ mm->mmap_base += mm->delta_mmap;
9707+#endif
9708+
9709 mm->get_unmapped_area = s390_get_unmapped_area;
9710 } else {
9711 mm->mmap_base = mmap_base();
9712+
9713+#ifdef CONFIG_PAX_RANDMMAP
9714+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9715+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
9716+#endif
9717+
9718 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
9719 }
9720 }
9721diff --git a/arch/score/include/asm/cache.h b/arch/score/include/asm/cache.h
9722index ae3d59f..f65f075 100644
9723--- a/arch/score/include/asm/cache.h
9724+++ b/arch/score/include/asm/cache.h
9725@@ -1,7 +1,9 @@
9726 #ifndef _ASM_SCORE_CACHE_H
9727 #define _ASM_SCORE_CACHE_H
9728
9729+#include <linux/const.h>
9730+
9731 #define L1_CACHE_SHIFT 4
9732-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
9733+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
9734
9735 #endif /* _ASM_SCORE_CACHE_H */
9736diff --git a/arch/score/include/asm/exec.h b/arch/score/include/asm/exec.h
9737index f9f3cd5..58ff438 100644
9738--- a/arch/score/include/asm/exec.h
9739+++ b/arch/score/include/asm/exec.h
9740@@ -1,6 +1,6 @@
9741 #ifndef _ASM_SCORE_EXEC_H
9742 #define _ASM_SCORE_EXEC_H
9743
9744-extern unsigned long arch_align_stack(unsigned long sp);
9745+#define arch_align_stack(x) (x)
9746
9747 #endif /* _ASM_SCORE_EXEC_H */
9748diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
9749index a1519ad3..e8ac1ff 100644
9750--- a/arch/score/kernel/process.c
9751+++ b/arch/score/kernel/process.c
9752@@ -116,8 +116,3 @@ unsigned long get_wchan(struct task_struct *task)
9753
9754 return task_pt_regs(task)->cp0_epc;
9755 }
9756-
9757-unsigned long arch_align_stack(unsigned long sp)
9758-{
9759- return sp;
9760-}
9761diff --git a/arch/sh/include/asm/cache.h b/arch/sh/include/asm/cache.h
9762index ef9e555..331bd29 100644
9763--- a/arch/sh/include/asm/cache.h
9764+++ b/arch/sh/include/asm/cache.h
9765@@ -9,10 +9,11 @@
9766 #define __ASM_SH_CACHE_H
9767 #ifdef __KERNEL__
9768
9769+#include <linux/const.h>
9770 #include <linux/init.h>
9771 #include <cpu/cache.h>
9772
9773-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
9774+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
9775
9776 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
9777
9778diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
9779index 6777177..cb5e44f 100644
9780--- a/arch/sh/mm/mmap.c
9781+++ b/arch/sh/mm/mmap.c
9782@@ -36,6 +36,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
9783 struct mm_struct *mm = current->mm;
9784 struct vm_area_struct *vma;
9785 int do_colour_align;
9786+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
9787 struct vm_unmapped_area_info info;
9788
9789 if (flags & MAP_FIXED) {
9790@@ -55,6 +56,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
9791 if (filp || (flags & MAP_SHARED))
9792 do_colour_align = 1;
9793
9794+#ifdef CONFIG_PAX_RANDMMAP
9795+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
9796+#endif
9797+
9798 if (addr) {
9799 if (do_colour_align)
9800 addr = COLOUR_ALIGN(addr, pgoff);
9801@@ -62,14 +67,13 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
9802 addr = PAGE_ALIGN(addr);
9803
9804 vma = find_vma(mm, addr);
9805- if (TASK_SIZE - len >= addr &&
9806- (!vma || addr + len <= vma->vm_start))
9807+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
9808 return addr;
9809 }
9810
9811 info.flags = 0;
9812 info.length = len;
9813- info.low_limit = TASK_UNMAPPED_BASE;
9814+ info.low_limit = mm->mmap_base;
9815 info.high_limit = TASK_SIZE;
9816 info.align_mask = do_colour_align ? (PAGE_MASK & shm_align_mask) : 0;
9817 info.align_offset = pgoff << PAGE_SHIFT;
9818@@ -85,6 +89,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9819 struct mm_struct *mm = current->mm;
9820 unsigned long addr = addr0;
9821 int do_colour_align;
9822+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
9823 struct vm_unmapped_area_info info;
9824
9825 if (flags & MAP_FIXED) {
9826@@ -104,6 +109,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9827 if (filp || (flags & MAP_SHARED))
9828 do_colour_align = 1;
9829
9830+#ifdef CONFIG_PAX_RANDMMAP
9831+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
9832+#endif
9833+
9834 /* requesting a specific address */
9835 if (addr) {
9836 if (do_colour_align)
9837@@ -112,8 +121,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9838 addr = PAGE_ALIGN(addr);
9839
9840 vma = find_vma(mm, addr);
9841- if (TASK_SIZE - len >= addr &&
9842- (!vma || addr + len <= vma->vm_start))
9843+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
9844 return addr;
9845 }
9846
9847@@ -135,6 +143,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9848 VM_BUG_ON(addr != -ENOMEM);
9849 info.flags = 0;
9850 info.low_limit = TASK_UNMAPPED_BASE;
9851+
9852+#ifdef CONFIG_PAX_RANDMMAP
9853+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9854+ info.low_limit += mm->delta_mmap;
9855+#endif
9856+
9857 info.high_limit = TASK_SIZE;
9858 addr = vm_unmapped_area(&info);
9859 }
9860diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
9861index 4082749..fd97781 100644
9862--- a/arch/sparc/include/asm/atomic_64.h
9863+++ b/arch/sparc/include/asm/atomic_64.h
9864@@ -15,18 +15,38 @@
9865 #define ATOMIC64_INIT(i) { (i) }
9866
9867 #define atomic_read(v) ACCESS_ONCE((v)->counter)
9868+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
9869+{
9870+ return ACCESS_ONCE(v->counter);
9871+}
9872 #define atomic64_read(v) ACCESS_ONCE((v)->counter)
9873+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
9874+{
9875+ return ACCESS_ONCE(v->counter);
9876+}
9877
9878 #define atomic_set(v, i) (((v)->counter) = i)
9879+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
9880+{
9881+ v->counter = i;
9882+}
9883 #define atomic64_set(v, i) (((v)->counter) = i)
9884+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
9885+{
9886+ v->counter = i;
9887+}
9888
9889-#define ATOMIC_OP(op) \
9890-void atomic_##op(int, atomic_t *); \
9891-void atomic64_##op(long, atomic64_t *);
9892+#define __ATOMIC_OP(op, suffix) \
9893+void atomic_##op##suffix(int, atomic##suffix##_t *); \
9894+void atomic64_##op##suffix(long, atomic64##suffix##_t *);
9895
9896-#define ATOMIC_OP_RETURN(op) \
9897-int atomic_##op##_return(int, atomic_t *); \
9898-long atomic64_##op##_return(long, atomic64_t *);
9899+#define ATOMIC_OP(op) __ATOMIC_OP(op, ) __ATOMIC_OP(op, _unchecked)
9900+
9901+#define __ATOMIC_OP_RETURN(op, suffix) \
9902+int atomic_##op##_return##suffix(int, atomic##suffix##_t *); \
9903+long atomic64_##op##_return##suffix(long, atomic64##suffix##_t *);
9904+
9905+#define ATOMIC_OP_RETURN(op) __ATOMIC_OP_RETURN(op, ) __ATOMIC_OP_RETURN(op, _unchecked)
9906
9907 #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
9908
9909@@ -35,13 +55,23 @@ ATOMIC_OPS(sub)
9910
9911 #undef ATOMIC_OPS
9912 #undef ATOMIC_OP_RETURN
9913+#undef __ATOMIC_OP_RETURN
9914 #undef ATOMIC_OP
9915+#undef __ATOMIC_OP
9916
9917 #define atomic_dec_return(v) atomic_sub_return(1, v)
9918 #define atomic64_dec_return(v) atomic64_sub_return(1, v)
9919
9920 #define atomic_inc_return(v) atomic_add_return(1, v)
9921+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
9922+{
9923+ return atomic_add_return_unchecked(1, v);
9924+}
9925 #define atomic64_inc_return(v) atomic64_add_return(1, v)
9926+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
9927+{
9928+ return atomic64_add_return_unchecked(1, v);
9929+}
9930
9931 /*
9932 * atomic_inc_and_test - increment and test
9933@@ -52,6 +82,10 @@ ATOMIC_OPS(sub)
9934 * other cases.
9935 */
9936 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
9937+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
9938+{
9939+ return atomic_inc_return_unchecked(v) == 0;
9940+}
9941 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
9942
9943 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
9944@@ -61,25 +95,60 @@ ATOMIC_OPS(sub)
9945 #define atomic64_dec_and_test(v) (atomic64_sub_return(1, v) == 0)
9946
9947 #define atomic_inc(v) atomic_add(1, v)
9948+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
9949+{
9950+ atomic_add_unchecked(1, v);
9951+}
9952 #define atomic64_inc(v) atomic64_add(1, v)
9953+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
9954+{
9955+ atomic64_add_unchecked(1, v);
9956+}
9957
9958 #define atomic_dec(v) atomic_sub(1, v)
9959+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
9960+{
9961+ atomic_sub_unchecked(1, v);
9962+}
9963 #define atomic64_dec(v) atomic64_sub(1, v)
9964+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
9965+{
9966+ atomic64_sub_unchecked(1, v);
9967+}
9968
9969 #define atomic_add_negative(i, v) (atomic_add_return(i, v) < 0)
9970 #define atomic64_add_negative(i, v) (atomic64_add_return(i, v) < 0)
9971
9972 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
9973+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
9974+{
9975+ return cmpxchg(&v->counter, old, new);
9976+}
9977 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
9978+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
9979+{
9980+ return xchg(&v->counter, new);
9981+}
9982
9983 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
9984 {
9985- int c, old;
9986+ int c, old, new;
9987 c = atomic_read(v);
9988 for (;;) {
9989- if (unlikely(c == (u)))
9990+ if (unlikely(c == u))
9991 break;
9992- old = atomic_cmpxchg((v), c, c + (a));
9993+
9994+ asm volatile("addcc %2, %0, %0\n"
9995+
9996+#ifdef CONFIG_PAX_REFCOUNT
9997+ "tvs %%icc, 6\n"
9998+#endif
9999+
10000+ : "=r" (new)
10001+ : "0" (c), "ir" (a)
10002+ : "cc");
10003+
10004+ old = atomic_cmpxchg(v, c, new);
10005 if (likely(old == c))
10006 break;
10007 c = old;
10008@@ -90,20 +159,35 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
10009 #define atomic64_cmpxchg(v, o, n) \
10010 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
10011 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
10012+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
10013+{
10014+ return xchg(&v->counter, new);
10015+}
10016
10017 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
10018 {
10019- long c, old;
10020+ long c, old, new;
10021 c = atomic64_read(v);
10022 for (;;) {
10023- if (unlikely(c == (u)))
10024+ if (unlikely(c == u))
10025 break;
10026- old = atomic64_cmpxchg((v), c, c + (a));
10027+
10028+ asm volatile("addcc %2, %0, %0\n"
10029+
10030+#ifdef CONFIG_PAX_REFCOUNT
10031+ "tvs %%xcc, 6\n"
10032+#endif
10033+
10034+ : "=r" (new)
10035+ : "0" (c), "ir" (a)
10036+ : "cc");
10037+
10038+ old = atomic64_cmpxchg(v, c, new);
10039 if (likely(old == c))
10040 break;
10041 c = old;
10042 }
10043- return c != (u);
10044+ return c != u;
10045 }
10046
10047 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
10048diff --git a/arch/sparc/include/asm/barrier_64.h b/arch/sparc/include/asm/barrier_64.h
10049index 7664894..45a974b 100644
10050--- a/arch/sparc/include/asm/barrier_64.h
10051+++ b/arch/sparc/include/asm/barrier_64.h
10052@@ -60,7 +60,7 @@ do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \
10053 do { \
10054 compiletime_assert_atomic_type(*p); \
10055 barrier(); \
10056- ACCESS_ONCE(*p) = (v); \
10057+ ACCESS_ONCE_RW(*p) = (v); \
10058 } while (0)
10059
10060 #define smp_load_acquire(p) \
10061diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
10062index 5bb6991..5c2132e 100644
10063--- a/arch/sparc/include/asm/cache.h
10064+++ b/arch/sparc/include/asm/cache.h
10065@@ -7,10 +7,12 @@
10066 #ifndef _SPARC_CACHE_H
10067 #define _SPARC_CACHE_H
10068
10069+#include <linux/const.h>
10070+
10071 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
10072
10073 #define L1_CACHE_SHIFT 5
10074-#define L1_CACHE_BYTES 32
10075+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
10076
10077 #ifdef CONFIG_SPARC32
10078 #define SMP_CACHE_BYTES_SHIFT 5
10079diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
10080index a24e41f..47677ff 100644
10081--- a/arch/sparc/include/asm/elf_32.h
10082+++ b/arch/sparc/include/asm/elf_32.h
10083@@ -114,6 +114,13 @@ typedef struct {
10084
10085 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
10086
10087+#ifdef CONFIG_PAX_ASLR
10088+#define PAX_ELF_ET_DYN_BASE 0x10000UL
10089+
10090+#define PAX_DELTA_MMAP_LEN 16
10091+#define PAX_DELTA_STACK_LEN 16
10092+#endif
10093+
10094 /* This yields a mask that user programs can use to figure out what
10095 instruction set this cpu supports. This can NOT be done in userspace
10096 on Sparc. */
10097diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
10098index 370ca1e..d4f4a98 100644
10099--- a/arch/sparc/include/asm/elf_64.h
10100+++ b/arch/sparc/include/asm/elf_64.h
10101@@ -189,6 +189,13 @@ typedef struct {
10102 #define ELF_ET_DYN_BASE 0x0000010000000000UL
10103 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
10104
10105+#ifdef CONFIG_PAX_ASLR
10106+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
10107+
10108+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
10109+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
10110+#endif
10111+
10112 extern unsigned long sparc64_elf_hwcap;
10113 #define ELF_HWCAP sparc64_elf_hwcap
10114
10115diff --git a/arch/sparc/include/asm/pgalloc_32.h b/arch/sparc/include/asm/pgalloc_32.h
10116index a3890da..f6a408e 100644
10117--- a/arch/sparc/include/asm/pgalloc_32.h
10118+++ b/arch/sparc/include/asm/pgalloc_32.h
10119@@ -35,6 +35,7 @@ static inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp)
10120 }
10121
10122 #define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD)
10123+#define pgd_populate_kernel(MM, PGD, PMD) pgd_populate((MM), (PGD), (PMD))
10124
10125 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm,
10126 unsigned long address)
10127diff --git a/arch/sparc/include/asm/pgalloc_64.h b/arch/sparc/include/asm/pgalloc_64.h
10128index 5e31871..13469c6 100644
10129--- a/arch/sparc/include/asm/pgalloc_64.h
10130+++ b/arch/sparc/include/asm/pgalloc_64.h
10131@@ -21,6 +21,7 @@ static inline void __pgd_populate(pgd_t *pgd, pud_t *pud)
10132 }
10133
10134 #define pgd_populate(MM, PGD, PUD) __pgd_populate(PGD, PUD)
10135+#define pgd_populate_kernel(MM, PGD, PMD) pgd_populate((MM), (PGD), (PMD))
10136
10137 static inline pgd_t *pgd_alloc(struct mm_struct *mm)
10138 {
10139@@ -38,6 +39,7 @@ static inline void __pud_populate(pud_t *pud, pmd_t *pmd)
10140 }
10141
10142 #define pud_populate(MM, PUD, PMD) __pud_populate(PUD, PMD)
10143+#define pud_populate_kernel(MM, PUD, PMD) pud_populate((MM), (PUD), (PMD))
10144
10145 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
10146 {
10147diff --git a/arch/sparc/include/asm/pgtable.h b/arch/sparc/include/asm/pgtable.h
10148index 59ba6f6..4518128 100644
10149--- a/arch/sparc/include/asm/pgtable.h
10150+++ b/arch/sparc/include/asm/pgtable.h
10151@@ -5,4 +5,8 @@
10152 #else
10153 #include <asm/pgtable_32.h>
10154 #endif
10155+
10156+#define ktla_ktva(addr) (addr)
10157+#define ktva_ktla(addr) (addr)
10158+
10159 #endif
10160diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
10161index f06b36a..bca3189 100644
10162--- a/arch/sparc/include/asm/pgtable_32.h
10163+++ b/arch/sparc/include/asm/pgtable_32.h
10164@@ -51,6 +51,9 @@ unsigned long __init bootmem_init(unsigned long *pages_avail);
10165 #define PAGE_SHARED SRMMU_PAGE_SHARED
10166 #define PAGE_COPY SRMMU_PAGE_COPY
10167 #define PAGE_READONLY SRMMU_PAGE_RDONLY
10168+#define PAGE_SHARED_NOEXEC SRMMU_PAGE_SHARED_NOEXEC
10169+#define PAGE_COPY_NOEXEC SRMMU_PAGE_COPY_NOEXEC
10170+#define PAGE_READONLY_NOEXEC SRMMU_PAGE_RDONLY_NOEXEC
10171 #define PAGE_KERNEL SRMMU_PAGE_KERNEL
10172
10173 /* Top-level page directory - dummy used by init-mm.
10174@@ -63,18 +66,18 @@ extern unsigned long ptr_in_current_pgd;
10175
10176 /* xwr */
10177 #define __P000 PAGE_NONE
10178-#define __P001 PAGE_READONLY
10179-#define __P010 PAGE_COPY
10180-#define __P011 PAGE_COPY
10181+#define __P001 PAGE_READONLY_NOEXEC
10182+#define __P010 PAGE_COPY_NOEXEC
10183+#define __P011 PAGE_COPY_NOEXEC
10184 #define __P100 PAGE_READONLY
10185 #define __P101 PAGE_READONLY
10186 #define __P110 PAGE_COPY
10187 #define __P111 PAGE_COPY
10188
10189 #define __S000 PAGE_NONE
10190-#define __S001 PAGE_READONLY
10191-#define __S010 PAGE_SHARED
10192-#define __S011 PAGE_SHARED
10193+#define __S001 PAGE_READONLY_NOEXEC
10194+#define __S010 PAGE_SHARED_NOEXEC
10195+#define __S011 PAGE_SHARED_NOEXEC
10196 #define __S100 PAGE_READONLY
10197 #define __S101 PAGE_READONLY
10198 #define __S110 PAGE_SHARED
10199diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
10200index ae51a11..eadfd03 100644
10201--- a/arch/sparc/include/asm/pgtsrmmu.h
10202+++ b/arch/sparc/include/asm/pgtsrmmu.h
10203@@ -111,6 +111,11 @@
10204 SRMMU_EXEC | SRMMU_REF)
10205 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
10206 SRMMU_EXEC | SRMMU_REF)
10207+
10208+#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
10209+#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
10210+#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
10211+
10212 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
10213 SRMMU_DIRTY | SRMMU_REF)
10214
10215diff --git a/arch/sparc/include/asm/setup.h b/arch/sparc/include/asm/setup.h
10216index 29d64b1..4272fe8 100644
10217--- a/arch/sparc/include/asm/setup.h
10218+++ b/arch/sparc/include/asm/setup.h
10219@@ -55,8 +55,8 @@ int handle_ldf_stq(u32 insn, struct pt_regs *regs);
10220 void handle_ld_nf(u32 insn, struct pt_regs *regs);
10221
10222 /* init_64.c */
10223-extern atomic_t dcpage_flushes;
10224-extern atomic_t dcpage_flushes_xcall;
10225+extern atomic_unchecked_t dcpage_flushes;
10226+extern atomic_unchecked_t dcpage_flushes_xcall;
10227
10228 extern int sysctl_tsb_ratio;
10229 #endif
10230diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
10231index 9689176..63c18ea 100644
10232--- a/arch/sparc/include/asm/spinlock_64.h
10233+++ b/arch/sparc/include/asm/spinlock_64.h
10234@@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long fla
10235
10236 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
10237
10238-static void inline arch_read_lock(arch_rwlock_t *lock)
10239+static inline void arch_read_lock(arch_rwlock_t *lock)
10240 {
10241 unsigned long tmp1, tmp2;
10242
10243 __asm__ __volatile__ (
10244 "1: ldsw [%2], %0\n"
10245 " brlz,pn %0, 2f\n"
10246-"4: add %0, 1, %1\n"
10247+"4: addcc %0, 1, %1\n"
10248+
10249+#ifdef CONFIG_PAX_REFCOUNT
10250+" tvs %%icc, 6\n"
10251+#endif
10252+
10253 " cas [%2], %0, %1\n"
10254 " cmp %0, %1\n"
10255 " bne,pn %%icc, 1b\n"
10256@@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_rwlock_t *lock)
10257 " .previous"
10258 : "=&r" (tmp1), "=&r" (tmp2)
10259 : "r" (lock)
10260- : "memory");
10261+ : "memory", "cc");
10262 }
10263
10264-static int inline arch_read_trylock(arch_rwlock_t *lock)
10265+static inline int arch_read_trylock(arch_rwlock_t *lock)
10266 {
10267 int tmp1, tmp2;
10268
10269@@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
10270 "1: ldsw [%2], %0\n"
10271 " brlz,a,pn %0, 2f\n"
10272 " mov 0, %0\n"
10273-" add %0, 1, %1\n"
10274+" addcc %0, 1, %1\n"
10275+
10276+#ifdef CONFIG_PAX_REFCOUNT
10277+" tvs %%icc, 6\n"
10278+#endif
10279+
10280 " cas [%2], %0, %1\n"
10281 " cmp %0, %1\n"
10282 " bne,pn %%icc, 1b\n"
10283@@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
10284 return tmp1;
10285 }
10286
10287-static void inline arch_read_unlock(arch_rwlock_t *lock)
10288+static inline void arch_read_unlock(arch_rwlock_t *lock)
10289 {
10290 unsigned long tmp1, tmp2;
10291
10292 __asm__ __volatile__(
10293 "1: lduw [%2], %0\n"
10294-" sub %0, 1, %1\n"
10295+" subcc %0, 1, %1\n"
10296+
10297+#ifdef CONFIG_PAX_REFCOUNT
10298+" tvs %%icc, 6\n"
10299+#endif
10300+
10301 " cas [%2], %0, %1\n"
10302 " cmp %0, %1\n"
10303 " bne,pn %%xcc, 1b\n"
10304@@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch_rwlock_t *lock)
10305 : "memory");
10306 }
10307
10308-static void inline arch_write_lock(arch_rwlock_t *lock)
10309+static inline void arch_write_lock(arch_rwlock_t *lock)
10310 {
10311 unsigned long mask, tmp1, tmp2;
10312
10313@@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_rwlock_t *lock)
10314 : "memory");
10315 }
10316
10317-static void inline arch_write_unlock(arch_rwlock_t *lock)
10318+static inline void arch_write_unlock(arch_rwlock_t *lock)
10319 {
10320 __asm__ __volatile__(
10321 " stw %%g0, [%0]"
10322@@ -186,7 +201,7 @@ static void inline arch_write_unlock(arch_rwlock_t *lock)
10323 : "memory");
10324 }
10325
10326-static int inline arch_write_trylock(arch_rwlock_t *lock)
10327+static inline int arch_write_trylock(arch_rwlock_t *lock)
10328 {
10329 unsigned long mask, tmp1, tmp2, result;
10330
10331diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
10332index fd7bd0a..2e2fa7a 100644
10333--- a/arch/sparc/include/asm/thread_info_32.h
10334+++ b/arch/sparc/include/asm/thread_info_32.h
10335@@ -47,6 +47,7 @@ struct thread_info {
10336 struct reg_window32 reg_window[NSWINS]; /* align for ldd! */
10337 unsigned long rwbuf_stkptrs[NSWINS];
10338 unsigned long w_saved;
10339+ unsigned long lowest_stack;
10340 };
10341
10342 /*
10343diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
10344index ff45516..73001ab 100644
10345--- a/arch/sparc/include/asm/thread_info_64.h
10346+++ b/arch/sparc/include/asm/thread_info_64.h
10347@@ -61,6 +61,8 @@ struct thread_info {
10348 struct pt_regs *kern_una_regs;
10349 unsigned int kern_una_insn;
10350
10351+ unsigned long lowest_stack;
10352+
10353 unsigned long fpregs[(7 * 256) / sizeof(unsigned long)]
10354 __attribute__ ((aligned(64)));
10355 };
10356@@ -184,12 +186,13 @@ register struct thread_info *current_thread_info_reg asm("g6");
10357 #define TIF_NEED_RESCHED 3 /* rescheduling necessary */
10358 /* flag bit 4 is available */
10359 #define TIF_UNALIGNED 5 /* allowed to do unaligned accesses */
10360-/* flag bit 6 is available */
10361+#define TIF_GRSEC_SETXID 6 /* update credentials on syscall entry/exit */
10362 #define TIF_32BIT 7 /* 32-bit binary */
10363 #define TIF_NOHZ 8 /* in adaptive nohz mode */
10364 #define TIF_SECCOMP 9 /* secure computing */
10365 #define TIF_SYSCALL_AUDIT 10 /* syscall auditing active */
10366 #define TIF_SYSCALL_TRACEPOINT 11 /* syscall tracepoint instrumentation */
10367+
10368 /* NOTE: Thread flags >= 12 should be ones we have no interest
10369 * in using in assembly, else we can't use the mask as
10370 * an immediate value in instructions such as andcc.
10371@@ -209,12 +212,17 @@ register struct thread_info *current_thread_info_reg asm("g6");
10372 #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
10373 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
10374 #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
10375+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
10376
10377 #define _TIF_USER_WORK_MASK ((0xff << TI_FLAG_WSAVED_SHIFT) | \
10378 _TIF_DO_NOTIFY_RESUME_MASK | \
10379 _TIF_NEED_RESCHED)
10380 #define _TIF_DO_NOTIFY_RESUME_MASK (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING)
10381
10382+#define _TIF_WORK_SYSCALL \
10383+ (_TIF_SYSCALL_TRACE | _TIF_SECCOMP | _TIF_SYSCALL_AUDIT | \
10384+ _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ | _TIF_GRSEC_SETXID)
10385+
10386 #define is_32bit_task() (test_thread_flag(TIF_32BIT))
10387
10388 /*
10389diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
10390index bd56c28..4b63d83 100644
10391--- a/arch/sparc/include/asm/uaccess.h
10392+++ b/arch/sparc/include/asm/uaccess.h
10393@@ -1,5 +1,6 @@
10394 #ifndef ___ASM_SPARC_UACCESS_H
10395 #define ___ASM_SPARC_UACCESS_H
10396+
10397 #if defined(__sparc__) && defined(__arch64__)
10398 #include <asm/uaccess_64.h>
10399 #else
10400diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
10401index 64ee103..388aef0 100644
10402--- a/arch/sparc/include/asm/uaccess_32.h
10403+++ b/arch/sparc/include/asm/uaccess_32.h
10404@@ -47,6 +47,7 @@
10405 #define __user_ok(addr, size) ({ (void)(size); (addr) < STACK_TOP; })
10406 #define __kernel_ok (segment_eq(get_fs(), KERNEL_DS))
10407 #define __access_ok(addr, size) (__user_ok((addr) & get_fs().seg, (size)))
10408+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
10409 #define access_ok(type, addr, size) \
10410 ({ (void)(type); __access_ok((unsigned long)(addr), size); })
10411
10412@@ -313,27 +314,46 @@ unsigned long __copy_user(void __user *to, const void __user *from, unsigned lon
10413
10414 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
10415 {
10416- if (n && __access_ok((unsigned long) to, n))
10417+ if ((long)n < 0)
10418+ return n;
10419+
10420+ if (n && __access_ok((unsigned long) to, n)) {
10421+ if (!__builtin_constant_p(n))
10422+ check_object_size(from, n, true);
10423 return __copy_user(to, (__force void __user *) from, n);
10424- else
10425+ } else
10426 return n;
10427 }
10428
10429 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
10430 {
10431+ if ((long)n < 0)
10432+ return n;
10433+
10434+ if (!__builtin_constant_p(n))
10435+ check_object_size(from, n, true);
10436+
10437 return __copy_user(to, (__force void __user *) from, n);
10438 }
10439
10440 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
10441 {
10442- if (n && __access_ok((unsigned long) from, n))
10443+ if ((long)n < 0)
10444+ return n;
10445+
10446+ if (n && __access_ok((unsigned long) from, n)) {
10447+ if (!__builtin_constant_p(n))
10448+ check_object_size(to, n, false);
10449 return __copy_user((__force void __user *) to, from, n);
10450- else
10451+ } else
10452 return n;
10453 }
10454
10455 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
10456 {
10457+ if ((long)n < 0)
10458+ return n;
10459+
10460 return __copy_user((__force void __user *) to, from, n);
10461 }
10462
10463diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
10464index a35194b..47dabc0d 100644
10465--- a/arch/sparc/include/asm/uaccess_64.h
10466+++ b/arch/sparc/include/asm/uaccess_64.h
10467@@ -10,6 +10,7 @@
10468 #include <linux/compiler.h>
10469 #include <linux/string.h>
10470 #include <linux/thread_info.h>
10471+#include <linux/kernel.h>
10472 #include <asm/asi.h>
10473 #include <asm/spitfire.h>
10474 #include <asm-generic/uaccess-unaligned.h>
10475@@ -54,6 +55,11 @@ static inline int __access_ok(const void __user * addr, unsigned long size)
10476 return 1;
10477 }
10478
10479+static inline int access_ok_noprefault(int type, const void __user * addr, unsigned long size)
10480+{
10481+ return 1;
10482+}
10483+
10484 static inline int access_ok(int type, const void __user * addr, unsigned long size)
10485 {
10486 return 1;
10487@@ -228,8 +234,15 @@ unsigned long copy_from_user_fixup(void *to, const void __user *from,
10488 static inline unsigned long __must_check
10489 copy_from_user(void *to, const void __user *from, unsigned long size)
10490 {
10491- unsigned long ret = ___copy_from_user(to, from, size);
10492+ unsigned long ret;
10493
10494+ if ((long)size < 0 || size > INT_MAX)
10495+ return size;
10496+
10497+ if (!__builtin_constant_p(size))
10498+ check_object_size(to, size, false);
10499+
10500+ ret = ___copy_from_user(to, from, size);
10501 if (unlikely(ret))
10502 ret = copy_from_user_fixup(to, from, size);
10503
10504@@ -245,8 +258,15 @@ unsigned long copy_to_user_fixup(void __user *to, const void *from,
10505 static inline unsigned long __must_check
10506 copy_to_user(void __user *to, const void *from, unsigned long size)
10507 {
10508- unsigned long ret = ___copy_to_user(to, from, size);
10509+ unsigned long ret;
10510
10511+ if ((long)size < 0 || size > INT_MAX)
10512+ return size;
10513+
10514+ if (!__builtin_constant_p(size))
10515+ check_object_size(from, size, true);
10516+
10517+ ret = ___copy_to_user(to, from, size);
10518 if (unlikely(ret))
10519 ret = copy_to_user_fixup(to, from, size);
10520 return ret;
10521diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
10522index 7cf9c6e..6206648 100644
10523--- a/arch/sparc/kernel/Makefile
10524+++ b/arch/sparc/kernel/Makefile
10525@@ -4,7 +4,7 @@
10526 #
10527
10528 asflags-y := -ansi
10529-ccflags-y := -Werror
10530+#ccflags-y := -Werror
10531
10532 extra-y := head_$(BITS).o
10533
10534diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
10535index 50e7b62..79fae35 100644
10536--- a/arch/sparc/kernel/process_32.c
10537+++ b/arch/sparc/kernel/process_32.c
10538@@ -123,14 +123,14 @@ void show_regs(struct pt_regs *r)
10539
10540 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
10541 r->psr, r->pc, r->npc, r->y, print_tainted());
10542- printk("PC: <%pS>\n", (void *) r->pc);
10543+ printk("PC: <%pA>\n", (void *) r->pc);
10544 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
10545 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
10546 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
10547 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
10548 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
10549 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
10550- printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
10551+ printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
10552
10553 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
10554 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
10555@@ -167,7 +167,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
10556 rw = (struct reg_window32 *) fp;
10557 pc = rw->ins[7];
10558 printk("[%08lx : ", pc);
10559- printk("%pS ] ", (void *) pc);
10560+ printk("%pA ] ", (void *) pc);
10561 fp = rw->ins[6];
10562 } while (++count < 16);
10563 printk("\n");
10564diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
10565index 46a5964..a35c62c 100644
10566--- a/arch/sparc/kernel/process_64.c
10567+++ b/arch/sparc/kernel/process_64.c
10568@@ -161,7 +161,7 @@ static void show_regwindow(struct pt_regs *regs)
10569 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
10570 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
10571 if (regs->tstate & TSTATE_PRIV)
10572- printk("I7: <%pS>\n", (void *) rwk->ins[7]);
10573+ printk("I7: <%pA>\n", (void *) rwk->ins[7]);
10574 }
10575
10576 void show_regs(struct pt_regs *regs)
10577@@ -170,7 +170,7 @@ void show_regs(struct pt_regs *regs)
10578
10579 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
10580 regs->tpc, regs->tnpc, regs->y, print_tainted());
10581- printk("TPC: <%pS>\n", (void *) regs->tpc);
10582+ printk("TPC: <%pA>\n", (void *) regs->tpc);
10583 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
10584 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
10585 regs->u_regs[3]);
10586@@ -183,7 +183,7 @@ void show_regs(struct pt_regs *regs)
10587 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
10588 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
10589 regs->u_regs[15]);
10590- printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
10591+ printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
10592 show_regwindow(regs);
10593 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
10594 }
10595@@ -278,7 +278,7 @@ void arch_trigger_all_cpu_backtrace(bool include_self)
10596 ((tp && tp->task) ? tp->task->pid : -1));
10597
10598 if (gp->tstate & TSTATE_PRIV) {
10599- printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
10600+ printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
10601 (void *) gp->tpc,
10602 (void *) gp->o7,
10603 (void *) gp->i7,
10604diff --git a/arch/sparc/kernel/prom_common.c b/arch/sparc/kernel/prom_common.c
10605index 79cc0d1..ec62734 100644
10606--- a/arch/sparc/kernel/prom_common.c
10607+++ b/arch/sparc/kernel/prom_common.c
10608@@ -144,7 +144,7 @@ static int __init prom_common_nextprop(phandle node, char *prev, char *buf)
10609
10610 unsigned int prom_early_allocated __initdata;
10611
10612-static struct of_pdt_ops prom_sparc_ops __initdata = {
10613+static struct of_pdt_ops prom_sparc_ops __initconst = {
10614 .nextprop = prom_common_nextprop,
10615 .getproplen = prom_getproplen,
10616 .getproperty = prom_getproperty,
10617diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
10618index 9ddc492..27a5619 100644
10619--- a/arch/sparc/kernel/ptrace_64.c
10620+++ b/arch/sparc/kernel/ptrace_64.c
10621@@ -1060,6 +1060,10 @@ long arch_ptrace(struct task_struct *child, long request,
10622 return ret;
10623 }
10624
10625+#ifdef CONFIG_GRKERNSEC_SETXID
10626+extern void gr_delayed_cred_worker(void);
10627+#endif
10628+
10629 asmlinkage int syscall_trace_enter(struct pt_regs *regs)
10630 {
10631 int ret = 0;
10632@@ -1070,6 +1074,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
10633 if (test_thread_flag(TIF_NOHZ))
10634 user_exit();
10635
10636+#ifdef CONFIG_GRKERNSEC_SETXID
10637+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
10638+ gr_delayed_cred_worker();
10639+#endif
10640+
10641 if (test_thread_flag(TIF_SYSCALL_TRACE))
10642 ret = tracehook_report_syscall_entry(regs);
10643
10644@@ -1088,6 +1097,11 @@ asmlinkage void syscall_trace_leave(struct pt_regs *regs)
10645 if (test_thread_flag(TIF_NOHZ))
10646 user_exit();
10647
10648+#ifdef CONFIG_GRKERNSEC_SETXID
10649+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
10650+ gr_delayed_cred_worker();
10651+#endif
10652+
10653 audit_syscall_exit(regs);
10654
10655 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
10656diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
10657index 61139d9..c1a5f28 100644
10658--- a/arch/sparc/kernel/smp_64.c
10659+++ b/arch/sparc/kernel/smp_64.c
10660@@ -887,7 +887,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
10661 return;
10662
10663 #ifdef CONFIG_DEBUG_DCFLUSH
10664- atomic_inc(&dcpage_flushes);
10665+ atomic_inc_unchecked(&dcpage_flushes);
10666 #endif
10667
10668 this_cpu = get_cpu();
10669@@ -911,7 +911,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
10670 xcall_deliver(data0, __pa(pg_addr),
10671 (u64) pg_addr, cpumask_of(cpu));
10672 #ifdef CONFIG_DEBUG_DCFLUSH
10673- atomic_inc(&dcpage_flushes_xcall);
10674+ atomic_inc_unchecked(&dcpage_flushes_xcall);
10675 #endif
10676 }
10677 }
10678@@ -930,7 +930,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
10679 preempt_disable();
10680
10681 #ifdef CONFIG_DEBUG_DCFLUSH
10682- atomic_inc(&dcpage_flushes);
10683+ atomic_inc_unchecked(&dcpage_flushes);
10684 #endif
10685 data0 = 0;
10686 pg_addr = page_address(page);
10687@@ -947,7 +947,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
10688 xcall_deliver(data0, __pa(pg_addr),
10689 (u64) pg_addr, cpu_online_mask);
10690 #ifdef CONFIG_DEBUG_DCFLUSH
10691- atomic_inc(&dcpage_flushes_xcall);
10692+ atomic_inc_unchecked(&dcpage_flushes_xcall);
10693 #endif
10694 }
10695 __local_flush_dcache_page(page);
10696diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
10697index 646988d..b88905f 100644
10698--- a/arch/sparc/kernel/sys_sparc_32.c
10699+++ b/arch/sparc/kernel/sys_sparc_32.c
10700@@ -54,7 +54,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
10701 if (len > TASK_SIZE - PAGE_SIZE)
10702 return -ENOMEM;
10703 if (!addr)
10704- addr = TASK_UNMAPPED_BASE;
10705+ addr = current->mm->mmap_base;
10706
10707 info.flags = 0;
10708 info.length = len;
10709diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
10710index 30e7ddb..266a3b0 100644
10711--- a/arch/sparc/kernel/sys_sparc_64.c
10712+++ b/arch/sparc/kernel/sys_sparc_64.c
10713@@ -89,13 +89,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
10714 struct vm_area_struct * vma;
10715 unsigned long task_size = TASK_SIZE;
10716 int do_color_align;
10717+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
10718 struct vm_unmapped_area_info info;
10719
10720 if (flags & MAP_FIXED) {
10721 /* We do not accept a shared mapping if it would violate
10722 * cache aliasing constraints.
10723 */
10724- if ((flags & MAP_SHARED) &&
10725+ if ((filp || (flags & MAP_SHARED)) &&
10726 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
10727 return -EINVAL;
10728 return addr;
10729@@ -110,6 +111,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
10730 if (filp || (flags & MAP_SHARED))
10731 do_color_align = 1;
10732
10733+#ifdef CONFIG_PAX_RANDMMAP
10734+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
10735+#endif
10736+
10737 if (addr) {
10738 if (do_color_align)
10739 addr = COLOR_ALIGN(addr, pgoff);
10740@@ -117,22 +122,28 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
10741 addr = PAGE_ALIGN(addr);
10742
10743 vma = find_vma(mm, addr);
10744- if (task_size - len >= addr &&
10745- (!vma || addr + len <= vma->vm_start))
10746+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
10747 return addr;
10748 }
10749
10750 info.flags = 0;
10751 info.length = len;
10752- info.low_limit = TASK_UNMAPPED_BASE;
10753+ info.low_limit = mm->mmap_base;
10754 info.high_limit = min(task_size, VA_EXCLUDE_START);
10755 info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
10756 info.align_offset = pgoff << PAGE_SHIFT;
10757+ info.threadstack_offset = offset;
10758 addr = vm_unmapped_area(&info);
10759
10760 if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
10761 VM_BUG_ON(addr != -ENOMEM);
10762 info.low_limit = VA_EXCLUDE_END;
10763+
10764+#ifdef CONFIG_PAX_RANDMMAP
10765+ if (mm->pax_flags & MF_PAX_RANDMMAP)
10766+ info.low_limit += mm->delta_mmap;
10767+#endif
10768+
10769 info.high_limit = task_size;
10770 addr = vm_unmapped_area(&info);
10771 }
10772@@ -150,6 +161,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10773 unsigned long task_size = STACK_TOP32;
10774 unsigned long addr = addr0;
10775 int do_color_align;
10776+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
10777 struct vm_unmapped_area_info info;
10778
10779 /* This should only ever run for 32-bit processes. */
10780@@ -159,7 +171,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10781 /* We do not accept a shared mapping if it would violate
10782 * cache aliasing constraints.
10783 */
10784- if ((flags & MAP_SHARED) &&
10785+ if ((filp || (flags & MAP_SHARED)) &&
10786 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
10787 return -EINVAL;
10788 return addr;
10789@@ -172,6 +184,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10790 if (filp || (flags & MAP_SHARED))
10791 do_color_align = 1;
10792
10793+#ifdef CONFIG_PAX_RANDMMAP
10794+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
10795+#endif
10796+
10797 /* requesting a specific address */
10798 if (addr) {
10799 if (do_color_align)
10800@@ -180,8 +196,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10801 addr = PAGE_ALIGN(addr);
10802
10803 vma = find_vma(mm, addr);
10804- if (task_size - len >= addr &&
10805- (!vma || addr + len <= vma->vm_start))
10806+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
10807 return addr;
10808 }
10809
10810@@ -191,6 +206,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10811 info.high_limit = mm->mmap_base;
10812 info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
10813 info.align_offset = pgoff << PAGE_SHIFT;
10814+ info.threadstack_offset = offset;
10815 addr = vm_unmapped_area(&info);
10816
10817 /*
10818@@ -203,6 +219,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10819 VM_BUG_ON(addr != -ENOMEM);
10820 info.flags = 0;
10821 info.low_limit = TASK_UNMAPPED_BASE;
10822+
10823+#ifdef CONFIG_PAX_RANDMMAP
10824+ if (mm->pax_flags & MF_PAX_RANDMMAP)
10825+ info.low_limit += mm->delta_mmap;
10826+#endif
10827+
10828 info.high_limit = STACK_TOP32;
10829 addr = vm_unmapped_area(&info);
10830 }
10831@@ -259,10 +281,14 @@ unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, u
10832 EXPORT_SYMBOL(get_fb_unmapped_area);
10833
10834 /* Essentially the same as PowerPC. */
10835-static unsigned long mmap_rnd(void)
10836+static unsigned long mmap_rnd(struct mm_struct *mm)
10837 {
10838 unsigned long rnd = 0UL;
10839
10840+#ifdef CONFIG_PAX_RANDMMAP
10841+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
10842+#endif
10843+
10844 if (current->flags & PF_RANDOMIZE) {
10845 unsigned long val = get_random_int();
10846 if (test_thread_flag(TIF_32BIT))
10847@@ -275,7 +301,7 @@ static unsigned long mmap_rnd(void)
10848
10849 void arch_pick_mmap_layout(struct mm_struct *mm)
10850 {
10851- unsigned long random_factor = mmap_rnd();
10852+ unsigned long random_factor = mmap_rnd(mm);
10853 unsigned long gap;
10854
10855 /*
10856@@ -288,6 +314,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
10857 gap == RLIM_INFINITY ||
10858 sysctl_legacy_va_layout) {
10859 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
10860+
10861+#ifdef CONFIG_PAX_RANDMMAP
10862+ if (mm->pax_flags & MF_PAX_RANDMMAP)
10863+ mm->mmap_base += mm->delta_mmap;
10864+#endif
10865+
10866 mm->get_unmapped_area = arch_get_unmapped_area;
10867 } else {
10868 /* We know it's 32-bit */
10869@@ -299,6 +331,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
10870 gap = (task_size / 6 * 5);
10871
10872 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
10873+
10874+#ifdef CONFIG_PAX_RANDMMAP
10875+ if (mm->pax_flags & MF_PAX_RANDMMAP)
10876+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
10877+#endif
10878+
10879 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
10880 }
10881 }
10882diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
10883index bb00089..e0ea580 100644
10884--- a/arch/sparc/kernel/syscalls.S
10885+++ b/arch/sparc/kernel/syscalls.S
10886@@ -62,7 +62,7 @@ sys32_rt_sigreturn:
10887 #endif
10888 .align 32
10889 1: ldx [%g6 + TI_FLAGS], %l5
10890- andcc %l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
10891+ andcc %l5, _TIF_WORK_SYSCALL, %g0
10892 be,pt %icc, rtrap
10893 nop
10894 call syscall_trace_leave
10895@@ -194,7 +194,7 @@ linux_sparc_syscall32:
10896
10897 srl %i3, 0, %o3 ! IEU0
10898 srl %i2, 0, %o2 ! IEU0 Group
10899- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
10900+ andcc %l0, _TIF_WORK_SYSCALL, %g0
10901 bne,pn %icc, linux_syscall_trace32 ! CTI
10902 mov %i0, %l5 ! IEU1
10903 5: call %l7 ! CTI Group brk forced
10904@@ -218,7 +218,7 @@ linux_sparc_syscall:
10905
10906 mov %i3, %o3 ! IEU1
10907 mov %i4, %o4 ! IEU0 Group
10908- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
10909+ andcc %l0, _TIF_WORK_SYSCALL, %g0
10910 bne,pn %icc, linux_syscall_trace ! CTI Group
10911 mov %i0, %l5 ! IEU0
10912 2: call %l7 ! CTI Group brk forced
10913@@ -233,7 +233,7 @@ ret_sys_call:
10914
10915 cmp %o0, -ERESTART_RESTARTBLOCK
10916 bgeu,pn %xcc, 1f
10917- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
10918+ andcc %l0, _TIF_WORK_SYSCALL, %g0
10919 ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc
10920
10921 2:
10922diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
10923index 6fd386c5..6907d81 100644
10924--- a/arch/sparc/kernel/traps_32.c
10925+++ b/arch/sparc/kernel/traps_32.c
10926@@ -44,6 +44,8 @@ static void instruction_dump(unsigned long *pc)
10927 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
10928 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
10929
10930+extern void gr_handle_kernel_exploit(void);
10931+
10932 void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
10933 {
10934 static int die_counter;
10935@@ -76,15 +78,17 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
10936 count++ < 30 &&
10937 (((unsigned long) rw) >= PAGE_OFFSET) &&
10938 !(((unsigned long) rw) & 0x7)) {
10939- printk("Caller[%08lx]: %pS\n", rw->ins[7],
10940+ printk("Caller[%08lx]: %pA\n", rw->ins[7],
10941 (void *) rw->ins[7]);
10942 rw = (struct reg_window32 *)rw->ins[6];
10943 }
10944 }
10945 printk("Instruction DUMP:");
10946 instruction_dump ((unsigned long *) regs->pc);
10947- if(regs->psr & PSR_PS)
10948+ if(regs->psr & PSR_PS) {
10949+ gr_handle_kernel_exploit();
10950 do_exit(SIGKILL);
10951+ }
10952 do_exit(SIGSEGV);
10953 }
10954
10955diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
10956index 0e69974..0c15a6e 100644
10957--- a/arch/sparc/kernel/traps_64.c
10958+++ b/arch/sparc/kernel/traps_64.c
10959@@ -79,7 +79,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
10960 i + 1,
10961 p->trapstack[i].tstate, p->trapstack[i].tpc,
10962 p->trapstack[i].tnpc, p->trapstack[i].tt);
10963- printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
10964+ printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
10965 }
10966 }
10967
10968@@ -99,6 +99,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
10969
10970 lvl -= 0x100;
10971 if (regs->tstate & TSTATE_PRIV) {
10972+
10973+#ifdef CONFIG_PAX_REFCOUNT
10974+ if (lvl == 6)
10975+ pax_report_refcount_overflow(regs);
10976+#endif
10977+
10978 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
10979 die_if_kernel(buffer, regs);
10980 }
10981@@ -117,11 +123,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
10982 void bad_trap_tl1(struct pt_regs *regs, long lvl)
10983 {
10984 char buffer[32];
10985-
10986+
10987 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
10988 0, lvl, SIGTRAP) == NOTIFY_STOP)
10989 return;
10990
10991+#ifdef CONFIG_PAX_REFCOUNT
10992+ if (lvl == 6)
10993+ pax_report_refcount_overflow(regs);
10994+#endif
10995+
10996 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
10997
10998 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
10999@@ -1151,7 +1162,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
11000 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
11001 printk("%s" "ERROR(%d): ",
11002 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
11003- printk("TPC<%pS>\n", (void *) regs->tpc);
11004+ printk("TPC<%pA>\n", (void *) regs->tpc);
11005 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
11006 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
11007 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
11008@@ -1758,7 +1769,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
11009 smp_processor_id(),
11010 (type & 0x1) ? 'I' : 'D',
11011 regs->tpc);
11012- printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
11013+ printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
11014 panic("Irrecoverable Cheetah+ parity error.");
11015 }
11016
11017@@ -1766,7 +1777,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
11018 smp_processor_id(),
11019 (type & 0x1) ? 'I' : 'D',
11020 regs->tpc);
11021- printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
11022+ printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
11023 }
11024
11025 struct sun4v_error_entry {
11026@@ -1839,8 +1850,8 @@ struct sun4v_error_entry {
11027 /*0x38*/u64 reserved_5;
11028 };
11029
11030-static atomic_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0);
11031-static atomic_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0);
11032+static atomic_unchecked_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0);
11033+static atomic_unchecked_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0);
11034
11035 static const char *sun4v_err_type_to_str(u8 type)
11036 {
11037@@ -1932,7 +1943,7 @@ static void sun4v_report_real_raddr(const char *pfx, struct pt_regs *regs)
11038 }
11039
11040 static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent,
11041- int cpu, const char *pfx, atomic_t *ocnt)
11042+ int cpu, const char *pfx, atomic_unchecked_t *ocnt)
11043 {
11044 u64 *raw_ptr = (u64 *) ent;
11045 u32 attrs;
11046@@ -1990,8 +2001,8 @@ static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent,
11047
11048 show_regs(regs);
11049
11050- if ((cnt = atomic_read(ocnt)) != 0) {
11051- atomic_set(ocnt, 0);
11052+ if ((cnt = atomic_read_unchecked(ocnt)) != 0) {
11053+ atomic_set_unchecked(ocnt, 0);
11054 wmb();
11055 printk("%s: Queue overflowed %d times.\n",
11056 pfx, cnt);
11057@@ -2048,7 +2059,7 @@ out:
11058 */
11059 void sun4v_resum_overflow(struct pt_regs *regs)
11060 {
11061- atomic_inc(&sun4v_resum_oflow_cnt);
11062+ atomic_inc_unchecked(&sun4v_resum_oflow_cnt);
11063 }
11064
11065 /* We run with %pil set to PIL_NORMAL_MAX and PSTATE_IE enabled in %pstate.
11066@@ -2101,7 +2112,7 @@ void sun4v_nonresum_overflow(struct pt_regs *regs)
11067 /* XXX Actually even this can make not that much sense. Perhaps
11068 * XXX we should just pull the plug and panic directly from here?
11069 */
11070- atomic_inc(&sun4v_nonresum_oflow_cnt);
11071+ atomic_inc_unchecked(&sun4v_nonresum_oflow_cnt);
11072 }
11073
11074 static void sun4v_tlb_error(struct pt_regs *regs)
11075@@ -2120,9 +2131,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
11076
11077 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
11078 regs->tpc, tl);
11079- printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
11080+ printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
11081 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
11082- printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
11083+ printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
11084 (void *) regs->u_regs[UREG_I7]);
11085 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
11086 "pte[%lx] error[%lx]\n",
11087@@ -2143,9 +2154,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
11088
11089 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
11090 regs->tpc, tl);
11091- printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
11092+ printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
11093 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
11094- printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
11095+ printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
11096 (void *) regs->u_regs[UREG_I7]);
11097 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
11098 "pte[%lx] error[%lx]\n",
11099@@ -2362,13 +2373,13 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
11100 fp = (unsigned long)sf->fp + STACK_BIAS;
11101 }
11102
11103- printk(" [%016lx] %pS\n", pc, (void *) pc);
11104+ printk(" [%016lx] %pA\n", pc, (void *) pc);
11105 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
11106 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
11107 int index = tsk->curr_ret_stack;
11108 if (tsk->ret_stack && index >= graph) {
11109 pc = tsk->ret_stack[index - graph].ret;
11110- printk(" [%016lx] %pS\n", pc, (void *) pc);
11111+ printk(" [%016lx] %pA\n", pc, (void *) pc);
11112 graph++;
11113 }
11114 }
11115@@ -2386,6 +2397,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
11116 return (struct reg_window *) (fp + STACK_BIAS);
11117 }
11118
11119+extern void gr_handle_kernel_exploit(void);
11120+
11121 void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
11122 {
11123 static int die_counter;
11124@@ -2414,7 +2427,7 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
11125 while (rw &&
11126 count++ < 30 &&
11127 kstack_valid(tp, (unsigned long) rw)) {
11128- printk("Caller[%016lx]: %pS\n", rw->ins[7],
11129+ printk("Caller[%016lx]: %pA\n", rw->ins[7],
11130 (void *) rw->ins[7]);
11131
11132 rw = kernel_stack_up(rw);
11133@@ -2429,8 +2442,10 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
11134 }
11135 if (panic_on_oops)
11136 panic("Fatal exception");
11137- if (regs->tstate & TSTATE_PRIV)
11138+ if (regs->tstate & TSTATE_PRIV) {
11139+ gr_handle_kernel_exploit();
11140 do_exit(SIGKILL);
11141+ }
11142 do_exit(SIGSEGV);
11143 }
11144 EXPORT_SYMBOL(die_if_kernel);
11145diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
11146index 62098a8..547ab2c 100644
11147--- a/arch/sparc/kernel/unaligned_64.c
11148+++ b/arch/sparc/kernel/unaligned_64.c
11149@@ -297,7 +297,7 @@ static void log_unaligned(struct pt_regs *regs)
11150 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
11151
11152 if (__ratelimit(&ratelimit)) {
11153- printk("Kernel unaligned access at TPC[%lx] %pS\n",
11154+ printk("Kernel unaligned access at TPC[%lx] %pA\n",
11155 regs->tpc, (void *) regs->tpc);
11156 }
11157 }
11158diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
11159index 3269b02..64f5231 100644
11160--- a/arch/sparc/lib/Makefile
11161+++ b/arch/sparc/lib/Makefile
11162@@ -2,7 +2,7 @@
11163 #
11164
11165 asflags-y := -ansi -DST_DIV0=0x02
11166-ccflags-y := -Werror
11167+#ccflags-y := -Werror
11168
11169 lib-$(CONFIG_SPARC32) += ashrdi3.o
11170 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
11171diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
11172index 05dac43..76f8ed4 100644
11173--- a/arch/sparc/lib/atomic_64.S
11174+++ b/arch/sparc/lib/atomic_64.S
11175@@ -15,11 +15,22 @@
11176 * a value and does the barriers.
11177 */
11178
11179-#define ATOMIC_OP(op) \
11180-ENTRY(atomic_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
11181+#ifdef CONFIG_PAX_REFCOUNT
11182+#define __REFCOUNT_OP(op) op##cc
11183+#define __OVERFLOW_IOP tvs %icc, 6;
11184+#define __OVERFLOW_XOP tvs %xcc, 6;
11185+#else
11186+#define __REFCOUNT_OP(op) op
11187+#define __OVERFLOW_IOP
11188+#define __OVERFLOW_XOP
11189+#endif
11190+
11191+#define __ATOMIC_OP(op, suffix, asm_op, post_op) \
11192+ENTRY(atomic_##op##suffix) /* %o0 = increment, %o1 = atomic_ptr */ \
11193 BACKOFF_SETUP(%o2); \
11194 1: lduw [%o1], %g1; \
11195- op %g1, %o0, %g7; \
11196+ asm_op %g1, %o0, %g7; \
11197+ post_op \
11198 cas [%o1], %g1, %g7; \
11199 cmp %g1, %g7; \
11200 bne,pn %icc, BACKOFF_LABEL(2f, 1b); \
11201@@ -29,11 +40,15 @@ ENTRY(atomic_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
11202 2: BACKOFF_SPIN(%o2, %o3, 1b); \
11203 ENDPROC(atomic_##op); \
11204
11205-#define ATOMIC_OP_RETURN(op) \
11206-ENTRY(atomic_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
11207+#define ATOMIC_OP(op) __ATOMIC_OP(op, , op, ) \
11208+ __ATOMIC_OP(op, _unchecked, __REFCOUNT_OP(op), __OVERFLOW_IOP)
11209+
11210+#define __ATOMIC_OP_RETURN(op, suffix, asm_op, post_op) \
11211+ENTRY(atomic_##op##_return##suffix) /* %o0 = increment, %o1 = atomic_ptr */\
11212 BACKOFF_SETUP(%o2); \
11213 1: lduw [%o1], %g1; \
11214- op %g1, %o0, %g7; \
11215+ asm_op %g1, %o0, %g7; \
11216+ post_op \
11217 cas [%o1], %g1, %g7; \
11218 cmp %g1, %g7; \
11219 bne,pn %icc, BACKOFF_LABEL(2f, 1b); \
11220@@ -43,6 +58,9 @@ ENTRY(atomic_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
11221 2: BACKOFF_SPIN(%o2, %o3, 1b); \
11222 ENDPROC(atomic_##op##_return);
11223
11224+#define ATOMIC_OP_RETURN(op) __ATOMIC_OP_RETURN(op, , op, ) \
11225+ __ATOMIC_OP_RETURN(op, _unchecked, __REFCOUNT_OP(op), __OVERFLOW_IOP)
11226+
11227 #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
11228
11229 ATOMIC_OPS(add)
11230@@ -50,13 +68,16 @@ ATOMIC_OPS(sub)
11231
11232 #undef ATOMIC_OPS
11233 #undef ATOMIC_OP_RETURN
11234+#undef __ATOMIC_OP_RETURN
11235 #undef ATOMIC_OP
11236+#undef __ATOMIC_OP
11237
11238-#define ATOMIC64_OP(op) \
11239-ENTRY(atomic64_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
11240+#define __ATOMIC64_OP(op, suffix, asm_op, post_op) \
11241+ENTRY(atomic64_##op##suffix) /* %o0 = increment, %o1 = atomic_ptr */ \
11242 BACKOFF_SETUP(%o2); \
11243 1: ldx [%o1], %g1; \
11244- op %g1, %o0, %g7; \
11245+ asm_op %g1, %o0, %g7; \
11246+ post_op \
11247 casx [%o1], %g1, %g7; \
11248 cmp %g1, %g7; \
11249 bne,pn %xcc, BACKOFF_LABEL(2f, 1b); \
11250@@ -66,11 +87,15 @@ ENTRY(atomic64_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
11251 2: BACKOFF_SPIN(%o2, %o3, 1b); \
11252 ENDPROC(atomic64_##op); \
11253
11254-#define ATOMIC64_OP_RETURN(op) \
11255-ENTRY(atomic64_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
11256+#define ATOMIC64_OP(op) __ATOMIC64_OP(op, , op, ) \
11257+ __ATOMIC64_OP(op, _unchecked, __REFCOUNT_OP(op), __OVERFLOW_XOP)
11258+
11259+#define __ATOMIC64_OP_RETURN(op, suffix, asm_op, post_op) \
11260+ENTRY(atomic64_##op##_return##suffix) /* %o0 = increment, %o1 = atomic_ptr */\
11261 BACKOFF_SETUP(%o2); \
11262 1: ldx [%o1], %g1; \
11263- op %g1, %o0, %g7; \
11264+ asm_op %g1, %o0, %g7; \
11265+ post_op \
11266 casx [%o1], %g1, %g7; \
11267 cmp %g1, %g7; \
11268 bne,pn %xcc, BACKOFF_LABEL(2f, 1b); \
11269@@ -80,6 +105,9 @@ ENTRY(atomic64_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
11270 2: BACKOFF_SPIN(%o2, %o3, 1b); \
11271 ENDPROC(atomic64_##op##_return);
11272
11273+#define ATOMIC64_OP_RETURN(op) __ATOMIC64_OP_RETURN(op, , op, ) \
11274+i __ATOMIC64_OP_RETURN(op, _unchecked, __REFCOUNT_OP(op), __OVERFLOW_XOP)
11275+
11276 #define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_OP_RETURN(op)
11277
11278 ATOMIC64_OPS(add)
11279@@ -87,7 +115,12 @@ ATOMIC64_OPS(sub)
11280
11281 #undef ATOMIC64_OPS
11282 #undef ATOMIC64_OP_RETURN
11283+#undef __ATOMIC64_OP_RETURN
11284 #undef ATOMIC64_OP
11285+#undef __ATOMIC64_OP
11286+#undef __OVERFLOW_XOP
11287+#undef __OVERFLOW_IOP
11288+#undef __REFCOUNT_OP
11289
11290 ENTRY(atomic64_dec_if_positive) /* %o0 = atomic_ptr */
11291 BACKOFF_SETUP(%o2)
11292diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
11293index 1d649a9..fbc5bfc 100644
11294--- a/arch/sparc/lib/ksyms.c
11295+++ b/arch/sparc/lib/ksyms.c
11296@@ -101,7 +101,9 @@ EXPORT_SYMBOL(__clear_user);
11297 /* Atomic counter implementation. */
11298 #define ATOMIC_OP(op) \
11299 EXPORT_SYMBOL(atomic_##op); \
11300-EXPORT_SYMBOL(atomic64_##op);
11301+EXPORT_SYMBOL(atomic_##op##_unchecked); \
11302+EXPORT_SYMBOL(atomic64_##op); \
11303+EXPORT_SYMBOL(atomic64_##op##_unchecked);
11304
11305 #define ATOMIC_OP_RETURN(op) \
11306 EXPORT_SYMBOL(atomic_##op##_return); \
11307@@ -110,6 +112,8 @@ EXPORT_SYMBOL(atomic64_##op##_return);
11308 #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
11309
11310 ATOMIC_OPS(add)
11311+EXPORT_SYMBOL(atomic_add_ret_unchecked);
11312+EXPORT_SYMBOL(atomic64_add_ret_unchecked);
11313 ATOMIC_OPS(sub)
11314
11315 #undef ATOMIC_OPS
11316diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
11317index 30c3ecc..736f015 100644
11318--- a/arch/sparc/mm/Makefile
11319+++ b/arch/sparc/mm/Makefile
11320@@ -2,7 +2,7 @@
11321 #
11322
11323 asflags-y := -ansi
11324-ccflags-y := -Werror
11325+#ccflags-y := -Werror
11326
11327 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
11328 obj-y += fault_$(BITS).o
11329diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
11330index 70d8171..274c6c0 100644
11331--- a/arch/sparc/mm/fault_32.c
11332+++ b/arch/sparc/mm/fault_32.c
11333@@ -21,6 +21,9 @@
11334 #include <linux/perf_event.h>
11335 #include <linux/interrupt.h>
11336 #include <linux/kdebug.h>
11337+#include <linux/slab.h>
11338+#include <linux/pagemap.h>
11339+#include <linux/compiler.h>
11340
11341 #include <asm/page.h>
11342 #include <asm/pgtable.h>
11343@@ -156,6 +159,277 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
11344 return safe_compute_effective_address(regs, insn);
11345 }
11346
11347+#ifdef CONFIG_PAX_PAGEEXEC
11348+#ifdef CONFIG_PAX_DLRESOLVE
11349+static void pax_emuplt_close(struct vm_area_struct *vma)
11350+{
11351+ vma->vm_mm->call_dl_resolve = 0UL;
11352+}
11353+
11354+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
11355+{
11356+ unsigned int *kaddr;
11357+
11358+ vmf->page = alloc_page(GFP_HIGHUSER);
11359+ if (!vmf->page)
11360+ return VM_FAULT_OOM;
11361+
11362+ kaddr = kmap(vmf->page);
11363+ memset(kaddr, 0, PAGE_SIZE);
11364+ kaddr[0] = 0x9DE3BFA8U; /* save */
11365+ flush_dcache_page(vmf->page);
11366+ kunmap(vmf->page);
11367+ return VM_FAULT_MAJOR;
11368+}
11369+
11370+static const struct vm_operations_struct pax_vm_ops = {
11371+ .close = pax_emuplt_close,
11372+ .fault = pax_emuplt_fault
11373+};
11374+
11375+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
11376+{
11377+ int ret;
11378+
11379+ INIT_LIST_HEAD(&vma->anon_vma_chain);
11380+ vma->vm_mm = current->mm;
11381+ vma->vm_start = addr;
11382+ vma->vm_end = addr + PAGE_SIZE;
11383+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
11384+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
11385+ vma->vm_ops = &pax_vm_ops;
11386+
11387+ ret = insert_vm_struct(current->mm, vma);
11388+ if (ret)
11389+ return ret;
11390+
11391+ ++current->mm->total_vm;
11392+ return 0;
11393+}
11394+#endif
11395+
11396+/*
11397+ * PaX: decide what to do with offenders (regs->pc = fault address)
11398+ *
11399+ * returns 1 when task should be killed
11400+ * 2 when patched PLT trampoline was detected
11401+ * 3 when unpatched PLT trampoline was detected
11402+ */
11403+static int pax_handle_fetch_fault(struct pt_regs *regs)
11404+{
11405+
11406+#ifdef CONFIG_PAX_EMUPLT
11407+ int err;
11408+
11409+ do { /* PaX: patched PLT emulation #1 */
11410+ unsigned int sethi1, sethi2, jmpl;
11411+
11412+ err = get_user(sethi1, (unsigned int *)regs->pc);
11413+ err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
11414+ err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
11415+
11416+ if (err)
11417+ break;
11418+
11419+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
11420+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
11421+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
11422+ {
11423+ unsigned int addr;
11424+
11425+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
11426+ addr = regs->u_regs[UREG_G1];
11427+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
11428+ regs->pc = addr;
11429+ regs->npc = addr+4;
11430+ return 2;
11431+ }
11432+ } while (0);
11433+
11434+ do { /* PaX: patched PLT emulation #2 */
11435+ unsigned int ba;
11436+
11437+ err = get_user(ba, (unsigned int *)regs->pc);
11438+
11439+ if (err)
11440+ break;
11441+
11442+ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
11443+ unsigned int addr;
11444+
11445+ if ((ba & 0xFFC00000U) == 0x30800000U)
11446+ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
11447+ else
11448+ addr = regs->pc + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
11449+ regs->pc = addr;
11450+ regs->npc = addr+4;
11451+ return 2;
11452+ }
11453+ } while (0);
11454+
11455+ do { /* PaX: patched PLT emulation #3 */
11456+ unsigned int sethi, bajmpl, nop;
11457+
11458+ err = get_user(sethi, (unsigned int *)regs->pc);
11459+ err |= get_user(bajmpl, (unsigned int *)(regs->pc+4));
11460+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
11461+
11462+ if (err)
11463+ break;
11464+
11465+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11466+ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
11467+ nop == 0x01000000U)
11468+ {
11469+ unsigned int addr;
11470+
11471+ addr = (sethi & 0x003FFFFFU) << 10;
11472+ regs->u_regs[UREG_G1] = addr;
11473+ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
11474+ addr += (((bajmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
11475+ else
11476+ addr = regs->pc + ((((bajmpl | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
11477+ regs->pc = addr;
11478+ regs->npc = addr+4;
11479+ return 2;
11480+ }
11481+ } while (0);
11482+
11483+ do { /* PaX: unpatched PLT emulation step 1 */
11484+ unsigned int sethi, ba, nop;
11485+
11486+ err = get_user(sethi, (unsigned int *)regs->pc);
11487+ err |= get_user(ba, (unsigned int *)(regs->pc+4));
11488+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
11489+
11490+ if (err)
11491+ break;
11492+
11493+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11494+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
11495+ nop == 0x01000000U)
11496+ {
11497+ unsigned int addr, save, call;
11498+
11499+ if ((ba & 0xFFC00000U) == 0x30800000U)
11500+ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
11501+ else
11502+ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
11503+
11504+ err = get_user(save, (unsigned int *)addr);
11505+ err |= get_user(call, (unsigned int *)(addr+4));
11506+ err |= get_user(nop, (unsigned int *)(addr+8));
11507+ if (err)
11508+ break;
11509+
11510+#ifdef CONFIG_PAX_DLRESOLVE
11511+ if (save == 0x9DE3BFA8U &&
11512+ (call & 0xC0000000U) == 0x40000000U &&
11513+ nop == 0x01000000U)
11514+ {
11515+ struct vm_area_struct *vma;
11516+ unsigned long call_dl_resolve;
11517+
11518+ down_read(&current->mm->mmap_sem);
11519+ call_dl_resolve = current->mm->call_dl_resolve;
11520+ up_read(&current->mm->mmap_sem);
11521+ if (likely(call_dl_resolve))
11522+ goto emulate;
11523+
11524+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
11525+
11526+ down_write(&current->mm->mmap_sem);
11527+ if (current->mm->call_dl_resolve) {
11528+ call_dl_resolve = current->mm->call_dl_resolve;
11529+ up_write(&current->mm->mmap_sem);
11530+ if (vma)
11531+ kmem_cache_free(vm_area_cachep, vma);
11532+ goto emulate;
11533+ }
11534+
11535+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
11536+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
11537+ up_write(&current->mm->mmap_sem);
11538+ if (vma)
11539+ kmem_cache_free(vm_area_cachep, vma);
11540+ return 1;
11541+ }
11542+
11543+ if (pax_insert_vma(vma, call_dl_resolve)) {
11544+ up_write(&current->mm->mmap_sem);
11545+ kmem_cache_free(vm_area_cachep, vma);
11546+ return 1;
11547+ }
11548+
11549+ current->mm->call_dl_resolve = call_dl_resolve;
11550+ up_write(&current->mm->mmap_sem);
11551+
11552+emulate:
11553+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
11554+ regs->pc = call_dl_resolve;
11555+ regs->npc = addr+4;
11556+ return 3;
11557+ }
11558+#endif
11559+
11560+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
11561+ if ((save & 0xFFC00000U) == 0x05000000U &&
11562+ (call & 0xFFFFE000U) == 0x85C0A000U &&
11563+ nop == 0x01000000U)
11564+ {
11565+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
11566+ regs->u_regs[UREG_G2] = addr + 4;
11567+ addr = (save & 0x003FFFFFU) << 10;
11568+ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
11569+ regs->pc = addr;
11570+ regs->npc = addr+4;
11571+ return 3;
11572+ }
11573+ }
11574+ } while (0);
11575+
11576+ do { /* PaX: unpatched PLT emulation step 2 */
11577+ unsigned int save, call, nop;
11578+
11579+ err = get_user(save, (unsigned int *)(regs->pc-4));
11580+ err |= get_user(call, (unsigned int *)regs->pc);
11581+ err |= get_user(nop, (unsigned int *)(regs->pc+4));
11582+ if (err)
11583+ break;
11584+
11585+ if (save == 0x9DE3BFA8U &&
11586+ (call & 0xC0000000U) == 0x40000000U &&
11587+ nop == 0x01000000U)
11588+ {
11589+ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
11590+
11591+ regs->u_regs[UREG_RETPC] = regs->pc;
11592+ regs->pc = dl_resolve;
11593+ regs->npc = dl_resolve+4;
11594+ return 3;
11595+ }
11596+ } while (0);
11597+#endif
11598+
11599+ return 1;
11600+}
11601+
11602+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
11603+{
11604+ unsigned long i;
11605+
11606+ printk(KERN_ERR "PAX: bytes at PC: ");
11607+ for (i = 0; i < 8; i++) {
11608+ unsigned int c;
11609+ if (get_user(c, (unsigned int *)pc+i))
11610+ printk(KERN_CONT "???????? ");
11611+ else
11612+ printk(KERN_CONT "%08x ", c);
11613+ }
11614+ printk("\n");
11615+}
11616+#endif
11617+
11618 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
11619 int text_fault)
11620 {
11621@@ -226,6 +500,24 @@ good_area:
11622 if (!(vma->vm_flags & VM_WRITE))
11623 goto bad_area;
11624 } else {
11625+
11626+#ifdef CONFIG_PAX_PAGEEXEC
11627+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
11628+ up_read(&mm->mmap_sem);
11629+ switch (pax_handle_fetch_fault(regs)) {
11630+
11631+#ifdef CONFIG_PAX_EMUPLT
11632+ case 2:
11633+ case 3:
11634+ return;
11635+#endif
11636+
11637+ }
11638+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
11639+ do_group_exit(SIGKILL);
11640+ }
11641+#endif
11642+
11643 /* Allow reads even for write-only mappings */
11644 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
11645 goto bad_area;
11646diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
11647index 4798232..f76e3aa 100644
11648--- a/arch/sparc/mm/fault_64.c
11649+++ b/arch/sparc/mm/fault_64.c
11650@@ -22,6 +22,9 @@
11651 #include <linux/kdebug.h>
11652 #include <linux/percpu.h>
11653 #include <linux/context_tracking.h>
11654+#include <linux/slab.h>
11655+#include <linux/pagemap.h>
11656+#include <linux/compiler.h>
11657
11658 #include <asm/page.h>
11659 #include <asm/pgtable.h>
11660@@ -76,7 +79,7 @@ static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
11661 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
11662 regs->tpc);
11663 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
11664- printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
11665+ printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
11666 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
11667 dump_stack();
11668 unhandled_fault(regs->tpc, current, regs);
11669@@ -279,6 +282,466 @@ static void noinline __kprobes bogus_32bit_fault_tpc(struct pt_regs *regs)
11670 show_regs(regs);
11671 }
11672
11673+#ifdef CONFIG_PAX_PAGEEXEC
11674+#ifdef CONFIG_PAX_DLRESOLVE
11675+static void pax_emuplt_close(struct vm_area_struct *vma)
11676+{
11677+ vma->vm_mm->call_dl_resolve = 0UL;
11678+}
11679+
11680+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
11681+{
11682+ unsigned int *kaddr;
11683+
11684+ vmf->page = alloc_page(GFP_HIGHUSER);
11685+ if (!vmf->page)
11686+ return VM_FAULT_OOM;
11687+
11688+ kaddr = kmap(vmf->page);
11689+ memset(kaddr, 0, PAGE_SIZE);
11690+ kaddr[0] = 0x9DE3BFA8U; /* save */
11691+ flush_dcache_page(vmf->page);
11692+ kunmap(vmf->page);
11693+ return VM_FAULT_MAJOR;
11694+}
11695+
11696+static const struct vm_operations_struct pax_vm_ops = {
11697+ .close = pax_emuplt_close,
11698+ .fault = pax_emuplt_fault
11699+};
11700+
11701+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
11702+{
11703+ int ret;
11704+
11705+ INIT_LIST_HEAD(&vma->anon_vma_chain);
11706+ vma->vm_mm = current->mm;
11707+ vma->vm_start = addr;
11708+ vma->vm_end = addr + PAGE_SIZE;
11709+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
11710+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
11711+ vma->vm_ops = &pax_vm_ops;
11712+
11713+ ret = insert_vm_struct(current->mm, vma);
11714+ if (ret)
11715+ return ret;
11716+
11717+ ++current->mm->total_vm;
11718+ return 0;
11719+}
11720+#endif
11721+
11722+/*
11723+ * PaX: decide what to do with offenders (regs->tpc = fault address)
11724+ *
11725+ * returns 1 when task should be killed
11726+ * 2 when patched PLT trampoline was detected
11727+ * 3 when unpatched PLT trampoline was detected
11728+ */
11729+static int pax_handle_fetch_fault(struct pt_regs *regs)
11730+{
11731+
11732+#ifdef CONFIG_PAX_EMUPLT
11733+ int err;
11734+
11735+ do { /* PaX: patched PLT emulation #1 */
11736+ unsigned int sethi1, sethi2, jmpl;
11737+
11738+ err = get_user(sethi1, (unsigned int *)regs->tpc);
11739+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
11740+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
11741+
11742+ if (err)
11743+ break;
11744+
11745+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
11746+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
11747+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
11748+ {
11749+ unsigned long addr;
11750+
11751+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
11752+ addr = regs->u_regs[UREG_G1];
11753+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
11754+
11755+ if (test_thread_flag(TIF_32BIT))
11756+ addr &= 0xFFFFFFFFUL;
11757+
11758+ regs->tpc = addr;
11759+ regs->tnpc = addr+4;
11760+ return 2;
11761+ }
11762+ } while (0);
11763+
11764+ do { /* PaX: patched PLT emulation #2 */
11765+ unsigned int ba;
11766+
11767+ err = get_user(ba, (unsigned int *)regs->tpc);
11768+
11769+ if (err)
11770+ break;
11771+
11772+ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
11773+ unsigned long addr;
11774+
11775+ if ((ba & 0xFFC00000U) == 0x30800000U)
11776+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
11777+ else
11778+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
11779+
11780+ if (test_thread_flag(TIF_32BIT))
11781+ addr &= 0xFFFFFFFFUL;
11782+
11783+ regs->tpc = addr;
11784+ regs->tnpc = addr+4;
11785+ return 2;
11786+ }
11787+ } while (0);
11788+
11789+ do { /* PaX: patched PLT emulation #3 */
11790+ unsigned int sethi, bajmpl, nop;
11791+
11792+ err = get_user(sethi, (unsigned int *)regs->tpc);
11793+ err |= get_user(bajmpl, (unsigned int *)(regs->tpc+4));
11794+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
11795+
11796+ if (err)
11797+ break;
11798+
11799+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11800+ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
11801+ nop == 0x01000000U)
11802+ {
11803+ unsigned long addr;
11804+
11805+ addr = (sethi & 0x003FFFFFU) << 10;
11806+ regs->u_regs[UREG_G1] = addr;
11807+ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
11808+ addr += (((bajmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
11809+ else
11810+ addr = regs->tpc + ((((bajmpl | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
11811+
11812+ if (test_thread_flag(TIF_32BIT))
11813+ addr &= 0xFFFFFFFFUL;
11814+
11815+ regs->tpc = addr;
11816+ regs->tnpc = addr+4;
11817+ return 2;
11818+ }
11819+ } while (0);
11820+
11821+ do { /* PaX: patched PLT emulation #4 */
11822+ unsigned int sethi, mov1, call, mov2;
11823+
11824+ err = get_user(sethi, (unsigned int *)regs->tpc);
11825+ err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
11826+ err |= get_user(call, (unsigned int *)(regs->tpc+8));
11827+ err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
11828+
11829+ if (err)
11830+ break;
11831+
11832+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11833+ mov1 == 0x8210000FU &&
11834+ (call & 0xC0000000U) == 0x40000000U &&
11835+ mov2 == 0x9E100001U)
11836+ {
11837+ unsigned long addr;
11838+
11839+ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
11840+ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
11841+
11842+ if (test_thread_flag(TIF_32BIT))
11843+ addr &= 0xFFFFFFFFUL;
11844+
11845+ regs->tpc = addr;
11846+ regs->tnpc = addr+4;
11847+ return 2;
11848+ }
11849+ } while (0);
11850+
11851+ do { /* PaX: patched PLT emulation #5 */
11852+ unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
11853+
11854+ err = get_user(sethi, (unsigned int *)regs->tpc);
11855+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
11856+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
11857+ err |= get_user(or1, (unsigned int *)(regs->tpc+12));
11858+ err |= get_user(or2, (unsigned int *)(regs->tpc+16));
11859+ err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
11860+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
11861+ err |= get_user(nop, (unsigned int *)(regs->tpc+28));
11862+
11863+ if (err)
11864+ break;
11865+
11866+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11867+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
11868+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
11869+ (or1 & 0xFFFFE000U) == 0x82106000U &&
11870+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
11871+ sllx == 0x83287020U &&
11872+ jmpl == 0x81C04005U &&
11873+ nop == 0x01000000U)
11874+ {
11875+ unsigned long addr;
11876+
11877+ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
11878+ regs->u_regs[UREG_G1] <<= 32;
11879+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
11880+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
11881+ regs->tpc = addr;
11882+ regs->tnpc = addr+4;
11883+ return 2;
11884+ }
11885+ } while (0);
11886+
11887+ do { /* PaX: patched PLT emulation #6 */
11888+ unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
11889+
11890+ err = get_user(sethi, (unsigned int *)regs->tpc);
11891+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
11892+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
11893+ err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
11894+ err |= get_user(or, (unsigned int *)(regs->tpc+16));
11895+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
11896+ err |= get_user(nop, (unsigned int *)(regs->tpc+24));
11897+
11898+ if (err)
11899+ break;
11900+
11901+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11902+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
11903+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
11904+ sllx == 0x83287020U &&
11905+ (or & 0xFFFFE000U) == 0x8A116000U &&
11906+ jmpl == 0x81C04005U &&
11907+ nop == 0x01000000U)
11908+ {
11909+ unsigned long addr;
11910+
11911+ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
11912+ regs->u_regs[UREG_G1] <<= 32;
11913+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
11914+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
11915+ regs->tpc = addr;
11916+ regs->tnpc = addr+4;
11917+ return 2;
11918+ }
11919+ } while (0);
11920+
11921+ do { /* PaX: unpatched PLT emulation step 1 */
11922+ unsigned int sethi, ba, nop;
11923+
11924+ err = get_user(sethi, (unsigned int *)regs->tpc);
11925+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
11926+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
11927+
11928+ if (err)
11929+ break;
11930+
11931+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11932+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
11933+ nop == 0x01000000U)
11934+ {
11935+ unsigned long addr;
11936+ unsigned int save, call;
11937+ unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
11938+
11939+ if ((ba & 0xFFC00000U) == 0x30800000U)
11940+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
11941+ else
11942+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
11943+
11944+ if (test_thread_flag(TIF_32BIT))
11945+ addr &= 0xFFFFFFFFUL;
11946+
11947+ err = get_user(save, (unsigned int *)addr);
11948+ err |= get_user(call, (unsigned int *)(addr+4));
11949+ err |= get_user(nop, (unsigned int *)(addr+8));
11950+ if (err)
11951+ break;
11952+
11953+#ifdef CONFIG_PAX_DLRESOLVE
11954+ if (save == 0x9DE3BFA8U &&
11955+ (call & 0xC0000000U) == 0x40000000U &&
11956+ nop == 0x01000000U)
11957+ {
11958+ struct vm_area_struct *vma;
11959+ unsigned long call_dl_resolve;
11960+
11961+ down_read(&current->mm->mmap_sem);
11962+ call_dl_resolve = current->mm->call_dl_resolve;
11963+ up_read(&current->mm->mmap_sem);
11964+ if (likely(call_dl_resolve))
11965+ goto emulate;
11966+
11967+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
11968+
11969+ down_write(&current->mm->mmap_sem);
11970+ if (current->mm->call_dl_resolve) {
11971+ call_dl_resolve = current->mm->call_dl_resolve;
11972+ up_write(&current->mm->mmap_sem);
11973+ if (vma)
11974+ kmem_cache_free(vm_area_cachep, vma);
11975+ goto emulate;
11976+ }
11977+
11978+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
11979+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
11980+ up_write(&current->mm->mmap_sem);
11981+ if (vma)
11982+ kmem_cache_free(vm_area_cachep, vma);
11983+ return 1;
11984+ }
11985+
11986+ if (pax_insert_vma(vma, call_dl_resolve)) {
11987+ up_write(&current->mm->mmap_sem);
11988+ kmem_cache_free(vm_area_cachep, vma);
11989+ return 1;
11990+ }
11991+
11992+ current->mm->call_dl_resolve = call_dl_resolve;
11993+ up_write(&current->mm->mmap_sem);
11994+
11995+emulate:
11996+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
11997+ regs->tpc = call_dl_resolve;
11998+ regs->tnpc = addr+4;
11999+ return 3;
12000+ }
12001+#endif
12002+
12003+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
12004+ if ((save & 0xFFC00000U) == 0x05000000U &&
12005+ (call & 0xFFFFE000U) == 0x85C0A000U &&
12006+ nop == 0x01000000U)
12007+ {
12008+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
12009+ regs->u_regs[UREG_G2] = addr + 4;
12010+ addr = (save & 0x003FFFFFU) << 10;
12011+ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
12012+
12013+ if (test_thread_flag(TIF_32BIT))
12014+ addr &= 0xFFFFFFFFUL;
12015+
12016+ regs->tpc = addr;
12017+ regs->tnpc = addr+4;
12018+ return 3;
12019+ }
12020+
12021+ /* PaX: 64-bit PLT stub */
12022+ err = get_user(sethi1, (unsigned int *)addr);
12023+ err |= get_user(sethi2, (unsigned int *)(addr+4));
12024+ err |= get_user(or1, (unsigned int *)(addr+8));
12025+ err |= get_user(or2, (unsigned int *)(addr+12));
12026+ err |= get_user(sllx, (unsigned int *)(addr+16));
12027+ err |= get_user(add, (unsigned int *)(addr+20));
12028+ err |= get_user(jmpl, (unsigned int *)(addr+24));
12029+ err |= get_user(nop, (unsigned int *)(addr+28));
12030+ if (err)
12031+ break;
12032+
12033+ if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
12034+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
12035+ (or1 & 0xFFFFE000U) == 0x88112000U &&
12036+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
12037+ sllx == 0x89293020U &&
12038+ add == 0x8A010005U &&
12039+ jmpl == 0x89C14000U &&
12040+ nop == 0x01000000U)
12041+ {
12042+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
12043+ regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
12044+ regs->u_regs[UREG_G4] <<= 32;
12045+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
12046+ regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
12047+ regs->u_regs[UREG_G4] = addr + 24;
12048+ addr = regs->u_regs[UREG_G5];
12049+ regs->tpc = addr;
12050+ regs->tnpc = addr+4;
12051+ return 3;
12052+ }
12053+ }
12054+ } while (0);
12055+
12056+#ifdef CONFIG_PAX_DLRESOLVE
12057+ do { /* PaX: unpatched PLT emulation step 2 */
12058+ unsigned int save, call, nop;
12059+
12060+ err = get_user(save, (unsigned int *)(regs->tpc-4));
12061+ err |= get_user(call, (unsigned int *)regs->tpc);
12062+ err |= get_user(nop, (unsigned int *)(regs->tpc+4));
12063+ if (err)
12064+ break;
12065+
12066+ if (save == 0x9DE3BFA8U &&
12067+ (call & 0xC0000000U) == 0x40000000U &&
12068+ nop == 0x01000000U)
12069+ {
12070+ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
12071+
12072+ if (test_thread_flag(TIF_32BIT))
12073+ dl_resolve &= 0xFFFFFFFFUL;
12074+
12075+ regs->u_regs[UREG_RETPC] = regs->tpc;
12076+ regs->tpc = dl_resolve;
12077+ regs->tnpc = dl_resolve+4;
12078+ return 3;
12079+ }
12080+ } while (0);
12081+#endif
12082+
12083+ do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
12084+ unsigned int sethi, ba, nop;
12085+
12086+ err = get_user(sethi, (unsigned int *)regs->tpc);
12087+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
12088+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
12089+
12090+ if (err)
12091+ break;
12092+
12093+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
12094+ (ba & 0xFFF00000U) == 0x30600000U &&
12095+ nop == 0x01000000U)
12096+ {
12097+ unsigned long addr;
12098+
12099+ addr = (sethi & 0x003FFFFFU) << 10;
12100+ regs->u_regs[UREG_G1] = addr;
12101+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
12102+
12103+ if (test_thread_flag(TIF_32BIT))
12104+ addr &= 0xFFFFFFFFUL;
12105+
12106+ regs->tpc = addr;
12107+ regs->tnpc = addr+4;
12108+ return 2;
12109+ }
12110+ } while (0);
12111+
12112+#endif
12113+
12114+ return 1;
12115+}
12116+
12117+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
12118+{
12119+ unsigned long i;
12120+
12121+ printk(KERN_ERR "PAX: bytes at PC: ");
12122+ for (i = 0; i < 8; i++) {
12123+ unsigned int c;
12124+ if (get_user(c, (unsigned int *)pc+i))
12125+ printk(KERN_CONT "???????? ");
12126+ else
12127+ printk(KERN_CONT "%08x ", c);
12128+ }
12129+ printk("\n");
12130+}
12131+#endif
12132+
12133 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
12134 {
12135 enum ctx_state prev_state = exception_enter();
12136@@ -353,6 +816,29 @@ retry:
12137 if (!vma)
12138 goto bad_area;
12139
12140+#ifdef CONFIG_PAX_PAGEEXEC
12141+ /* PaX: detect ITLB misses on non-exec pages */
12142+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
12143+ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
12144+ {
12145+ if (address != regs->tpc)
12146+ goto good_area;
12147+
12148+ up_read(&mm->mmap_sem);
12149+ switch (pax_handle_fetch_fault(regs)) {
12150+
12151+#ifdef CONFIG_PAX_EMUPLT
12152+ case 2:
12153+ case 3:
12154+ return;
12155+#endif
12156+
12157+ }
12158+ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
12159+ do_group_exit(SIGKILL);
12160+ }
12161+#endif
12162+
12163 /* Pure DTLB misses do not tell us whether the fault causing
12164 * load/store/atomic was a write or not, it only says that there
12165 * was no match. So in such a case we (carefully) read the
12166diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
12167index 4242eab..9ae6360 100644
12168--- a/arch/sparc/mm/hugetlbpage.c
12169+++ b/arch/sparc/mm/hugetlbpage.c
12170@@ -25,8 +25,10 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
12171 unsigned long addr,
12172 unsigned long len,
12173 unsigned long pgoff,
12174- unsigned long flags)
12175+ unsigned long flags,
12176+ unsigned long offset)
12177 {
12178+ struct mm_struct *mm = current->mm;
12179 unsigned long task_size = TASK_SIZE;
12180 struct vm_unmapped_area_info info;
12181
12182@@ -35,15 +37,22 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
12183
12184 info.flags = 0;
12185 info.length = len;
12186- info.low_limit = TASK_UNMAPPED_BASE;
12187+ info.low_limit = mm->mmap_base;
12188 info.high_limit = min(task_size, VA_EXCLUDE_START);
12189 info.align_mask = PAGE_MASK & ~HPAGE_MASK;
12190 info.align_offset = 0;
12191+ info.threadstack_offset = offset;
12192 addr = vm_unmapped_area(&info);
12193
12194 if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
12195 VM_BUG_ON(addr != -ENOMEM);
12196 info.low_limit = VA_EXCLUDE_END;
12197+
12198+#ifdef CONFIG_PAX_RANDMMAP
12199+ if (mm->pax_flags & MF_PAX_RANDMMAP)
12200+ info.low_limit += mm->delta_mmap;
12201+#endif
12202+
12203 info.high_limit = task_size;
12204 addr = vm_unmapped_area(&info);
12205 }
12206@@ -55,7 +64,8 @@ static unsigned long
12207 hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
12208 const unsigned long len,
12209 const unsigned long pgoff,
12210- const unsigned long flags)
12211+ const unsigned long flags,
12212+ const unsigned long offset)
12213 {
12214 struct mm_struct *mm = current->mm;
12215 unsigned long addr = addr0;
12216@@ -70,6 +80,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
12217 info.high_limit = mm->mmap_base;
12218 info.align_mask = PAGE_MASK & ~HPAGE_MASK;
12219 info.align_offset = 0;
12220+ info.threadstack_offset = offset;
12221 addr = vm_unmapped_area(&info);
12222
12223 /*
12224@@ -82,6 +93,12 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
12225 VM_BUG_ON(addr != -ENOMEM);
12226 info.flags = 0;
12227 info.low_limit = TASK_UNMAPPED_BASE;
12228+
12229+#ifdef CONFIG_PAX_RANDMMAP
12230+ if (mm->pax_flags & MF_PAX_RANDMMAP)
12231+ info.low_limit += mm->delta_mmap;
12232+#endif
12233+
12234 info.high_limit = STACK_TOP32;
12235 addr = vm_unmapped_area(&info);
12236 }
12237@@ -96,6 +113,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
12238 struct mm_struct *mm = current->mm;
12239 struct vm_area_struct *vma;
12240 unsigned long task_size = TASK_SIZE;
12241+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
12242
12243 if (test_thread_flag(TIF_32BIT))
12244 task_size = STACK_TOP32;
12245@@ -111,19 +129,22 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
12246 return addr;
12247 }
12248
12249+#ifdef CONFIG_PAX_RANDMMAP
12250+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
12251+#endif
12252+
12253 if (addr) {
12254 addr = ALIGN(addr, HPAGE_SIZE);
12255 vma = find_vma(mm, addr);
12256- if (task_size - len >= addr &&
12257- (!vma || addr + len <= vma->vm_start))
12258+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
12259 return addr;
12260 }
12261 if (mm->get_unmapped_area == arch_get_unmapped_area)
12262 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
12263- pgoff, flags);
12264+ pgoff, flags, offset);
12265 else
12266 return hugetlb_get_unmapped_area_topdown(file, addr, len,
12267- pgoff, flags);
12268+ pgoff, flags, offset);
12269 }
12270
12271 pte_t *huge_pte_alloc(struct mm_struct *mm,
12272diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
12273index 4ca0d6b..e89bca1 100644
12274--- a/arch/sparc/mm/init_64.c
12275+++ b/arch/sparc/mm/init_64.c
12276@@ -186,9 +186,9 @@ unsigned long sparc64_kern_sec_context __read_mostly;
12277 int num_kernel_image_mappings;
12278
12279 #ifdef CONFIG_DEBUG_DCFLUSH
12280-atomic_t dcpage_flushes = ATOMIC_INIT(0);
12281+atomic_unchecked_t dcpage_flushes = ATOMIC_INIT(0);
12282 #ifdef CONFIG_SMP
12283-atomic_t dcpage_flushes_xcall = ATOMIC_INIT(0);
12284+atomic_unchecked_t dcpage_flushes_xcall = ATOMIC_INIT(0);
12285 #endif
12286 #endif
12287
12288@@ -196,7 +196,7 @@ inline void flush_dcache_page_impl(struct page *page)
12289 {
12290 BUG_ON(tlb_type == hypervisor);
12291 #ifdef CONFIG_DEBUG_DCFLUSH
12292- atomic_inc(&dcpage_flushes);
12293+ atomic_inc_unchecked(&dcpage_flushes);
12294 #endif
12295
12296 #ifdef DCACHE_ALIASING_POSSIBLE
12297@@ -468,10 +468,10 @@ void mmu_info(struct seq_file *m)
12298
12299 #ifdef CONFIG_DEBUG_DCFLUSH
12300 seq_printf(m, "DCPageFlushes\t: %d\n",
12301- atomic_read(&dcpage_flushes));
12302+ atomic_read_unchecked(&dcpage_flushes));
12303 #ifdef CONFIG_SMP
12304 seq_printf(m, "DCPageFlushesXC\t: %d\n",
12305- atomic_read(&dcpage_flushes_xcall));
12306+ atomic_read_unchecked(&dcpage_flushes_xcall));
12307 #endif /* CONFIG_SMP */
12308 #endif /* CONFIG_DEBUG_DCFLUSH */
12309 }
12310diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
12311index 7cca418..53fc030 100644
12312--- a/arch/tile/Kconfig
12313+++ b/arch/tile/Kconfig
12314@@ -192,6 +192,7 @@ source "kernel/Kconfig.hz"
12315
12316 config KEXEC
12317 bool "kexec system call"
12318+ depends on !GRKERNSEC_KMEM
12319 ---help---
12320 kexec is a system call that implements the ability to shutdown your
12321 current kernel, and to start another kernel. It is like a reboot
12322diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h
12323index 7b11c5f..755a026 100644
12324--- a/arch/tile/include/asm/atomic_64.h
12325+++ b/arch/tile/include/asm/atomic_64.h
12326@@ -105,6 +105,16 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
12327
12328 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
12329
12330+#define atomic64_read_unchecked(v) atomic64_read(v)
12331+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
12332+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
12333+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
12334+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
12335+#define atomic64_inc_unchecked(v) atomic64_inc(v)
12336+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
12337+#define atomic64_dec_unchecked(v) atomic64_dec(v)
12338+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
12339+
12340 /* Define this to indicate that cmpxchg is an efficient operation. */
12341 #define __HAVE_ARCH_CMPXCHG
12342
12343diff --git a/arch/tile/include/asm/cache.h b/arch/tile/include/asm/cache.h
12344index 6160761..00cac88 100644
12345--- a/arch/tile/include/asm/cache.h
12346+++ b/arch/tile/include/asm/cache.h
12347@@ -15,11 +15,12 @@
12348 #ifndef _ASM_TILE_CACHE_H
12349 #define _ASM_TILE_CACHE_H
12350
12351+#include <linux/const.h>
12352 #include <arch/chip.h>
12353
12354 /* bytes per L1 data cache line */
12355 #define L1_CACHE_SHIFT CHIP_L1D_LOG_LINE_SIZE()
12356-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
12357+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
12358
12359 /* bytes per L2 cache line */
12360 #define L2_CACHE_SHIFT CHIP_L2_LOG_LINE_SIZE()
12361diff --git a/arch/tile/include/asm/uaccess.h b/arch/tile/include/asm/uaccess.h
12362index f41cb53..31d3ab4 100644
12363--- a/arch/tile/include/asm/uaccess.h
12364+++ b/arch/tile/include/asm/uaccess.h
12365@@ -417,9 +417,9 @@ static inline unsigned long __must_check copy_from_user(void *to,
12366 const void __user *from,
12367 unsigned long n)
12368 {
12369- int sz = __compiletime_object_size(to);
12370+ size_t sz = __compiletime_object_size(to);
12371
12372- if (likely(sz == -1 || sz >= n))
12373+ if (likely(sz == (size_t)-1 || sz >= n))
12374 n = _copy_from_user(to, from, n);
12375 else
12376 copy_from_user_overflow();
12377diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c
12378index 8416240..a012fb7 100644
12379--- a/arch/tile/mm/hugetlbpage.c
12380+++ b/arch/tile/mm/hugetlbpage.c
12381@@ -179,6 +179,7 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
12382 info.high_limit = TASK_SIZE;
12383 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
12384 info.align_offset = 0;
12385+ info.threadstack_offset = 0;
12386 return vm_unmapped_area(&info);
12387 }
12388
12389@@ -196,6 +197,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
12390 info.high_limit = current->mm->mmap_base;
12391 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
12392 info.align_offset = 0;
12393+ info.threadstack_offset = 0;
12394 addr = vm_unmapped_area(&info);
12395
12396 /*
12397diff --git a/arch/um/Makefile b/arch/um/Makefile
12398index e4b1a96..16162f8 100644
12399--- a/arch/um/Makefile
12400+++ b/arch/um/Makefile
12401@@ -72,6 +72,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
12402 $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
12403 $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64 -idirafter include
12404
12405+ifdef CONSTIFY_PLUGIN
12406+USER_CFLAGS += -fplugin-arg-constify_plugin-no-constify
12407+endif
12408+
12409 #This will adjust *FLAGS accordingly to the platform.
12410 include $(srctree)/$(ARCH_DIR)/Makefile-os-$(OS)
12411
12412diff --git a/arch/um/include/asm/cache.h b/arch/um/include/asm/cache.h
12413index 19e1bdd..3665b77 100644
12414--- a/arch/um/include/asm/cache.h
12415+++ b/arch/um/include/asm/cache.h
12416@@ -1,6 +1,7 @@
12417 #ifndef __UM_CACHE_H
12418 #define __UM_CACHE_H
12419
12420+#include <linux/const.h>
12421
12422 #if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT)
12423 # define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
12424@@ -12,6 +13,6 @@
12425 # define L1_CACHE_SHIFT 5
12426 #endif
12427
12428-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
12429+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
12430
12431 #endif
12432diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
12433index 2e0a6b1..a64d0f5 100644
12434--- a/arch/um/include/asm/kmap_types.h
12435+++ b/arch/um/include/asm/kmap_types.h
12436@@ -8,6 +8,6 @@
12437
12438 /* No more #include "asm/arch/kmap_types.h" ! */
12439
12440-#define KM_TYPE_NR 14
12441+#define KM_TYPE_NR 15
12442
12443 #endif
12444diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
12445index 71c5d13..4c7b9f1 100644
12446--- a/arch/um/include/asm/page.h
12447+++ b/arch/um/include/asm/page.h
12448@@ -14,6 +14,9 @@
12449 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
12450 #define PAGE_MASK (~(PAGE_SIZE-1))
12451
12452+#define ktla_ktva(addr) (addr)
12453+#define ktva_ktla(addr) (addr)
12454+
12455 #ifndef __ASSEMBLY__
12456
12457 struct page;
12458diff --git a/arch/um/include/asm/pgtable-3level.h b/arch/um/include/asm/pgtable-3level.h
12459index 2b4274e..754fe06 100644
12460--- a/arch/um/include/asm/pgtable-3level.h
12461+++ b/arch/um/include/asm/pgtable-3level.h
12462@@ -58,6 +58,7 @@
12463 #define pud_present(x) (pud_val(x) & _PAGE_PRESENT)
12464 #define pud_populate(mm, pud, pmd) \
12465 set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd)))
12466+#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
12467
12468 #ifdef CONFIG_64BIT
12469 #define set_pud(pudptr, pudval) set_64bit((u64 *) (pudptr), pud_val(pudval))
12470diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
12471index f17bca8..48adb87 100644
12472--- a/arch/um/kernel/process.c
12473+++ b/arch/um/kernel/process.c
12474@@ -356,22 +356,6 @@ int singlestepping(void * t)
12475 return 2;
12476 }
12477
12478-/*
12479- * Only x86 and x86_64 have an arch_align_stack().
12480- * All other arches have "#define arch_align_stack(x) (x)"
12481- * in their asm/exec.h
12482- * As this is included in UML from asm-um/system-generic.h,
12483- * we can use it to behave as the subarch does.
12484- */
12485-#ifndef arch_align_stack
12486-unsigned long arch_align_stack(unsigned long sp)
12487-{
12488- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
12489- sp -= get_random_int() % 8192;
12490- return sp & ~0xf;
12491-}
12492-#endif
12493-
12494 unsigned long get_wchan(struct task_struct *p)
12495 {
12496 unsigned long stack_page, sp, ip;
12497diff --git a/arch/unicore32/include/asm/cache.h b/arch/unicore32/include/asm/cache.h
12498index ad8f795..2c7eec6 100644
12499--- a/arch/unicore32/include/asm/cache.h
12500+++ b/arch/unicore32/include/asm/cache.h
12501@@ -12,8 +12,10 @@
12502 #ifndef __UNICORE_CACHE_H__
12503 #define __UNICORE_CACHE_H__
12504
12505-#define L1_CACHE_SHIFT (5)
12506-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
12507+#include <linux/const.h>
12508+
12509+#define L1_CACHE_SHIFT 5
12510+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
12511
12512 /*
12513 * Memory returned by kmalloc() may be used for DMA, so we must make
12514diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
12515index 570c71d..992da93 100644
12516--- a/arch/x86/Kconfig
12517+++ b/arch/x86/Kconfig
12518@@ -132,7 +132,7 @@ config X86
12519 select RTC_LIB
12520 select HAVE_DEBUG_STACKOVERFLOW
12521 select HAVE_IRQ_EXIT_ON_IRQ_STACK if X86_64
12522- select HAVE_CC_STACKPROTECTOR
12523+ select HAVE_CC_STACKPROTECTOR if X86_64 || !PAX_MEMORY_UDEREF
12524 select GENERIC_CPU_AUTOPROBE
12525 select HAVE_ARCH_AUDITSYSCALL
12526 select ARCH_SUPPORTS_ATOMIC_RMW
12527@@ -266,7 +266,7 @@ config X86_HT
12528
12529 config X86_32_LAZY_GS
12530 def_bool y
12531- depends on X86_32 && !CC_STACKPROTECTOR
12532+ depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
12533
12534 config ARCH_HWEIGHT_CFLAGS
12535 string
12536@@ -632,6 +632,7 @@ config SCHED_OMIT_FRAME_POINTER
12537
12538 menuconfig HYPERVISOR_GUEST
12539 bool "Linux guest support"
12540+ depends on !GRKERNSEC_CONFIG_AUTO || GRKERNSEC_CONFIG_VIRT_GUEST || (GRKERNSEC_CONFIG_VIRT_HOST && GRKERNSEC_CONFIG_VIRT_XEN)
12541 ---help---
12542 Say Y here to enable options for running Linux under various hyper-
12543 visors. This option enables basic hypervisor detection and platform
12544@@ -1013,6 +1014,7 @@ config VM86
12545
12546 config X86_16BIT
12547 bool "Enable support for 16-bit segments" if EXPERT
12548+ depends on !GRKERNSEC
12549 default y
12550 ---help---
12551 This option is required by programs like Wine to run 16-bit
12552@@ -1186,6 +1188,7 @@ choice
12553
12554 config NOHIGHMEM
12555 bool "off"
12556+ depends on !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
12557 ---help---
12558 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
12559 However, the address space of 32-bit x86 processors is only 4
12560@@ -1222,6 +1225,7 @@ config NOHIGHMEM
12561
12562 config HIGHMEM4G
12563 bool "4GB"
12564+ depends on !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
12565 ---help---
12566 Select this if you have a 32-bit processor and between 1 and 4
12567 gigabytes of physical RAM.
12568@@ -1274,7 +1278,7 @@ config PAGE_OFFSET
12569 hex
12570 default 0xB0000000 if VMSPLIT_3G_OPT
12571 default 0x80000000 if VMSPLIT_2G
12572- default 0x78000000 if VMSPLIT_2G_OPT
12573+ default 0x70000000 if VMSPLIT_2G_OPT
12574 default 0x40000000 if VMSPLIT_1G
12575 default 0xC0000000
12576 depends on X86_32
12577@@ -1715,6 +1719,7 @@ source kernel/Kconfig.hz
12578
12579 config KEXEC
12580 bool "kexec system call"
12581+ depends on !GRKERNSEC_KMEM
12582 ---help---
12583 kexec is a system call that implements the ability to shutdown your
12584 current kernel, and to start another kernel. It is like a reboot
12585@@ -1900,7 +1905,9 @@ config X86_NEED_RELOCS
12586
12587 config PHYSICAL_ALIGN
12588 hex "Alignment value to which kernel should be aligned"
12589- default "0x200000"
12590+ default "0x1000000"
12591+ range 0x200000 0x1000000 if PAX_KERNEXEC && X86_PAE
12592+ range 0x400000 0x1000000 if PAX_KERNEXEC && !X86_PAE
12593 range 0x2000 0x1000000 if X86_32
12594 range 0x200000 0x1000000 if X86_64
12595 ---help---
12596@@ -1983,6 +1990,7 @@ config COMPAT_VDSO
12597 def_bool n
12598 prompt "Disable the 32-bit vDSO (needed for glibc 2.3.3)"
12599 depends on X86_32 || IA32_EMULATION
12600+ depends on !PAX_PAGEEXEC && !PAX_SEGMEXEC && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
12601 ---help---
12602 Certain buggy versions of glibc will crash if they are
12603 presented with a 32-bit vDSO that is not mapped at the address
12604diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
12605index 6983314..54ad7e8 100644
12606--- a/arch/x86/Kconfig.cpu
12607+++ b/arch/x86/Kconfig.cpu
12608@@ -319,7 +319,7 @@ config X86_PPRO_FENCE
12609
12610 config X86_F00F_BUG
12611 def_bool y
12612- depends on M586MMX || M586TSC || M586 || M486
12613+ depends on (M586MMX || M586TSC || M586 || M486) && !PAX_KERNEXEC
12614
12615 config X86_INVD_BUG
12616 def_bool y
12617@@ -327,7 +327,7 @@ config X86_INVD_BUG
12618
12619 config X86_ALIGNMENT_16
12620 def_bool y
12621- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
12622+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
12623
12624 config X86_INTEL_USERCOPY
12625 def_bool y
12626@@ -369,7 +369,7 @@ config X86_CMPXCHG64
12627 # generates cmov.
12628 config X86_CMOV
12629 def_bool y
12630- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
12631+ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
12632
12633 config X86_MINIMUM_CPU_FAMILY
12634 int
12635diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
12636index 20028da..88d5946 100644
12637--- a/arch/x86/Kconfig.debug
12638+++ b/arch/x86/Kconfig.debug
12639@@ -93,7 +93,7 @@ config EFI_PGT_DUMP
12640 config DEBUG_RODATA
12641 bool "Write protect kernel read-only data structures"
12642 default y
12643- depends on DEBUG_KERNEL
12644+ depends on DEBUG_KERNEL && BROKEN
12645 ---help---
12646 Mark the kernel read-only data as write-protected in the pagetables,
12647 in order to catch accidental (and incorrect) writes to such const
12648@@ -111,7 +111,7 @@ config DEBUG_RODATA_TEST
12649
12650 config DEBUG_SET_MODULE_RONX
12651 bool "Set loadable kernel module data as NX and text as RO"
12652- depends on MODULES
12653+ depends on MODULES && BROKEN
12654 ---help---
12655 This option helps catch unintended modifications to loadable
12656 kernel module's text and read-only data. It also prevents execution
12657diff --git a/arch/x86/Makefile b/arch/x86/Makefile
12658index 5ba2d9c..41e5bb6 100644
12659--- a/arch/x86/Makefile
12660+++ b/arch/x86/Makefile
12661@@ -65,9 +65,6 @@ ifeq ($(CONFIG_X86_32),y)
12662 # CPU-specific tuning. Anything which can be shared with UML should go here.
12663 include $(srctree)/arch/x86/Makefile_32.cpu
12664 KBUILD_CFLAGS += $(cflags-y)
12665-
12666- # temporary until string.h is fixed
12667- KBUILD_CFLAGS += -ffreestanding
12668 else
12669 BITS := 64
12670 UTS_MACHINE := x86_64
12671@@ -107,6 +104,9 @@ else
12672 KBUILD_CFLAGS += $(call cc-option,-maccumulate-outgoing-args)
12673 endif
12674
12675+# temporary until string.h is fixed
12676+KBUILD_CFLAGS += -ffreestanding
12677+
12678 # Make sure compiler does not have buggy stack-protector support.
12679 ifdef CONFIG_CC_STACKPROTECTOR
12680 cc_has_sp := $(srctree)/scripts/gcc-x86_$(BITS)-has-stack-protector.sh
12681@@ -181,6 +181,7 @@ archheaders:
12682 $(Q)$(MAKE) $(build)=arch/x86/syscalls all
12683
12684 archprepare:
12685+ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
12686 ifeq ($(CONFIG_KEXEC_FILE),y)
12687 $(Q)$(MAKE) $(build)=arch/x86/purgatory arch/x86/purgatory/kexec-purgatory.c
12688 endif
12689@@ -264,3 +265,9 @@ define archhelp
12690 echo ' FDARGS="..." arguments for the booted kernel'
12691 echo ' FDINITRD=file initrd for the booted kernel'
12692 endef
12693+
12694+define OLD_LD
12695+
12696+*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
12697+*** Please upgrade your binutils to 2.18 or newer
12698+endef
12699diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
12700index 57bbf2f..b100fce 100644
12701--- a/arch/x86/boot/Makefile
12702+++ b/arch/x86/boot/Makefile
12703@@ -58,6 +58,9 @@ clean-files += cpustr.h
12704 # ---------------------------------------------------------------------------
12705
12706 KBUILD_CFLAGS := $(USERINCLUDE) $(REALMODE_CFLAGS) -D_SETUP
12707+ifdef CONSTIFY_PLUGIN
12708+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
12709+endif
12710 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
12711 GCOV_PROFILE := n
12712
12713diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
12714index 878e4b9..20537ab 100644
12715--- a/arch/x86/boot/bitops.h
12716+++ b/arch/x86/boot/bitops.h
12717@@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
12718 u8 v;
12719 const u32 *p = (const u32 *)addr;
12720
12721- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
12722+ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
12723 return v;
12724 }
12725
12726@@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
12727
12728 static inline void set_bit(int nr, void *addr)
12729 {
12730- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
12731+ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
12732 }
12733
12734 #endif /* BOOT_BITOPS_H */
12735diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
12736index bd49ec6..94c7f58 100644
12737--- a/arch/x86/boot/boot.h
12738+++ b/arch/x86/boot/boot.h
12739@@ -84,7 +84,7 @@ static inline void io_delay(void)
12740 static inline u16 ds(void)
12741 {
12742 u16 seg;
12743- asm("movw %%ds,%0" : "=rm" (seg));
12744+ asm volatile("movw %%ds,%0" : "=rm" (seg));
12745 return seg;
12746 }
12747
12748diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
12749index 0a291cd..9686efc 100644
12750--- a/arch/x86/boot/compressed/Makefile
12751+++ b/arch/x86/boot/compressed/Makefile
12752@@ -30,6 +30,9 @@ KBUILD_CFLAGS += $(cflags-y)
12753 KBUILD_CFLAGS += -mno-mmx -mno-sse
12754 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
12755 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
12756+ifdef CONSTIFY_PLUGIN
12757+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
12758+endif
12759
12760 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
12761 GCOV_PROFILE := n
12762diff --git a/arch/x86/boot/compressed/efi_stub_32.S b/arch/x86/boot/compressed/efi_stub_32.S
12763index a53440e..c3dbf1e 100644
12764--- a/arch/x86/boot/compressed/efi_stub_32.S
12765+++ b/arch/x86/boot/compressed/efi_stub_32.S
12766@@ -46,16 +46,13 @@ ENTRY(efi_call_phys)
12767 * parameter 2, ..., param n. To make things easy, we save the return
12768 * address of efi_call_phys in a global variable.
12769 */
12770- popl %ecx
12771- movl %ecx, saved_return_addr(%edx)
12772- /* get the function pointer into ECX*/
12773- popl %ecx
12774- movl %ecx, efi_rt_function_ptr(%edx)
12775+ popl saved_return_addr(%edx)
12776+ popl efi_rt_function_ptr(%edx)
12777
12778 /*
12779 * 3. Call the physical function.
12780 */
12781- call *%ecx
12782+ call *efi_rt_function_ptr(%edx)
12783
12784 /*
12785 * 4. Balance the stack. And because EAX contain the return value,
12786@@ -67,15 +64,12 @@ ENTRY(efi_call_phys)
12787 1: popl %edx
12788 subl $1b, %edx
12789
12790- movl efi_rt_function_ptr(%edx), %ecx
12791- pushl %ecx
12792+ pushl efi_rt_function_ptr(%edx)
12793
12794 /*
12795 * 10. Push the saved return address onto the stack and return.
12796 */
12797- movl saved_return_addr(%edx), %ecx
12798- pushl %ecx
12799- ret
12800+ jmpl *saved_return_addr(%edx)
12801 ENDPROC(efi_call_phys)
12802 .previous
12803
12804diff --git a/arch/x86/boot/compressed/efi_thunk_64.S b/arch/x86/boot/compressed/efi_thunk_64.S
12805index 630384a..278e788 100644
12806--- a/arch/x86/boot/compressed/efi_thunk_64.S
12807+++ b/arch/x86/boot/compressed/efi_thunk_64.S
12808@@ -189,8 +189,8 @@ efi_gdt64:
12809 .long 0 /* Filled out by user */
12810 .word 0
12811 .quad 0x0000000000000000 /* NULL descriptor */
12812- .quad 0x00af9a000000ffff /* __KERNEL_CS */
12813- .quad 0x00cf92000000ffff /* __KERNEL_DS */
12814+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
12815+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
12816 .quad 0x0080890000000000 /* TS descriptor */
12817 .quad 0x0000000000000000 /* TS continued */
12818 efi_gdt64_end:
12819diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
12820index 1d7fbbc..36ecd58 100644
12821--- a/arch/x86/boot/compressed/head_32.S
12822+++ b/arch/x86/boot/compressed/head_32.S
12823@@ -140,10 +140,10 @@ preferred_addr:
12824 addl %eax, %ebx
12825 notl %eax
12826 andl %eax, %ebx
12827- cmpl $LOAD_PHYSICAL_ADDR, %ebx
12828+ cmpl $____LOAD_PHYSICAL_ADDR, %ebx
12829 jge 1f
12830 #endif
12831- movl $LOAD_PHYSICAL_ADDR, %ebx
12832+ movl $____LOAD_PHYSICAL_ADDR, %ebx
12833 1:
12834
12835 /* Target address to relocate to for decompression */
12836diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
12837index 6b1766c..ad465c9 100644
12838--- a/arch/x86/boot/compressed/head_64.S
12839+++ b/arch/x86/boot/compressed/head_64.S
12840@@ -94,10 +94,10 @@ ENTRY(startup_32)
12841 addl %eax, %ebx
12842 notl %eax
12843 andl %eax, %ebx
12844- cmpl $LOAD_PHYSICAL_ADDR, %ebx
12845+ cmpl $____LOAD_PHYSICAL_ADDR, %ebx
12846 jge 1f
12847 #endif
12848- movl $LOAD_PHYSICAL_ADDR, %ebx
12849+ movl $____LOAD_PHYSICAL_ADDR, %ebx
12850 1:
12851
12852 /* Target address to relocate to for decompression */
12853@@ -322,10 +322,10 @@ preferred_addr:
12854 addq %rax, %rbp
12855 notq %rax
12856 andq %rax, %rbp
12857- cmpq $LOAD_PHYSICAL_ADDR, %rbp
12858+ cmpq $____LOAD_PHYSICAL_ADDR, %rbp
12859 jge 1f
12860 #endif
12861- movq $LOAD_PHYSICAL_ADDR, %rbp
12862+ movq $____LOAD_PHYSICAL_ADDR, %rbp
12863 1:
12864
12865 /* Target address to relocate to for decompression */
12866@@ -434,8 +434,8 @@ gdt:
12867 .long gdt
12868 .word 0
12869 .quad 0x0000000000000000 /* NULL descriptor */
12870- .quad 0x00af9a000000ffff /* __KERNEL_CS */
12871- .quad 0x00cf92000000ffff /* __KERNEL_DS */
12872+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
12873+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
12874 .quad 0x0080890000000000 /* TS descriptor */
12875 .quad 0x0000000000000000 /* TS continued */
12876 gdt_end:
12877diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
12878index a950864..c710239 100644
12879--- a/arch/x86/boot/compressed/misc.c
12880+++ b/arch/x86/boot/compressed/misc.c
12881@@ -242,7 +242,7 @@ static void handle_relocations(void *output, unsigned long output_len)
12882 * Calculate the delta between where vmlinux was linked to load
12883 * and where it was actually loaded.
12884 */
12885- delta = min_addr - LOAD_PHYSICAL_ADDR;
12886+ delta = min_addr - ____LOAD_PHYSICAL_ADDR;
12887 if (!delta) {
12888 debug_putstr("No relocation needed... ");
12889 return;
12890@@ -324,7 +324,7 @@ static void parse_elf(void *output)
12891 Elf32_Ehdr ehdr;
12892 Elf32_Phdr *phdrs, *phdr;
12893 #endif
12894- void *dest;
12895+ void *dest, *prev;
12896 int i;
12897
12898 memcpy(&ehdr, output, sizeof(ehdr));
12899@@ -351,13 +351,16 @@ static void parse_elf(void *output)
12900 case PT_LOAD:
12901 #ifdef CONFIG_RELOCATABLE
12902 dest = output;
12903- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
12904+ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
12905 #else
12906 dest = (void *)(phdr->p_paddr);
12907 #endif
12908 memcpy(dest,
12909 output + phdr->p_offset,
12910 phdr->p_filesz);
12911+ if (i)
12912+ memset(prev, 0xff, dest - prev);
12913+ prev = dest + phdr->p_filesz;
12914 break;
12915 default: /* Ignore other PT_* */ break;
12916 }
12917@@ -416,7 +419,7 @@ asmlinkage __visible void *decompress_kernel(void *rmode, memptr heap,
12918 error("Destination address too large");
12919 #endif
12920 #ifndef CONFIG_RELOCATABLE
12921- if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
12922+ if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
12923 error("Wrong destination address");
12924 #endif
12925
12926diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
12927index 1fd7d57..0f7d096 100644
12928--- a/arch/x86/boot/cpucheck.c
12929+++ b/arch/x86/boot/cpucheck.c
12930@@ -125,9 +125,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
12931 u32 ecx = MSR_K7_HWCR;
12932 u32 eax, edx;
12933
12934- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
12935+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
12936 eax &= ~(1 << 15);
12937- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
12938+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
12939
12940 get_cpuflags(); /* Make sure it really did something */
12941 err = check_cpuflags();
12942@@ -140,9 +140,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
12943 u32 ecx = MSR_VIA_FCR;
12944 u32 eax, edx;
12945
12946- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
12947+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
12948 eax |= (1<<1)|(1<<7);
12949- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
12950+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
12951
12952 set_bit(X86_FEATURE_CX8, cpu.flags);
12953 err = check_cpuflags();
12954@@ -153,12 +153,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
12955 u32 eax, edx;
12956 u32 level = 1;
12957
12958- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
12959- asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
12960- asm("cpuid"
12961+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
12962+ asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
12963+ asm volatile("cpuid"
12964 : "+a" (level), "=d" (cpu.flags[0])
12965 : : "ecx", "ebx");
12966- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
12967+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
12968
12969 err = check_cpuflags();
12970 } else if (err == 0x01 &&
12971diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
12972index 16ef025..91e033b 100644
12973--- a/arch/x86/boot/header.S
12974+++ b/arch/x86/boot/header.S
12975@@ -438,10 +438,14 @@ setup_data: .quad 0 # 64-bit physical pointer to
12976 # single linked list of
12977 # struct setup_data
12978
12979-pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
12980+pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
12981
12982 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
12983+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
12984+#define VO_INIT_SIZE (VO__end - VO__text - __PAGE_OFFSET - ____LOAD_PHYSICAL_ADDR)
12985+#else
12986 #define VO_INIT_SIZE (VO__end - VO__text)
12987+#endif
12988 #if ZO_INIT_SIZE > VO_INIT_SIZE
12989 #define INIT_SIZE ZO_INIT_SIZE
12990 #else
12991diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
12992index db75d07..8e6d0af 100644
12993--- a/arch/x86/boot/memory.c
12994+++ b/arch/x86/boot/memory.c
12995@@ -19,7 +19,7 @@
12996
12997 static int detect_memory_e820(void)
12998 {
12999- int count = 0;
13000+ unsigned int count = 0;
13001 struct biosregs ireg, oreg;
13002 struct e820entry *desc = boot_params.e820_map;
13003 static struct e820entry buf; /* static so it is zeroed */
13004diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
13005index ba3e100..6501b8f 100644
13006--- a/arch/x86/boot/video-vesa.c
13007+++ b/arch/x86/boot/video-vesa.c
13008@@ -201,6 +201,7 @@ static void vesa_store_pm_info(void)
13009
13010 boot_params.screen_info.vesapm_seg = oreg.es;
13011 boot_params.screen_info.vesapm_off = oreg.di;
13012+ boot_params.screen_info.vesapm_size = oreg.cx;
13013 }
13014
13015 /*
13016diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
13017index 43eda28..5ab5fdb 100644
13018--- a/arch/x86/boot/video.c
13019+++ b/arch/x86/boot/video.c
13020@@ -96,7 +96,7 @@ static void store_mode_params(void)
13021 static unsigned int get_entry(void)
13022 {
13023 char entry_buf[4];
13024- int i, len = 0;
13025+ unsigned int i, len = 0;
13026 int key;
13027 unsigned int v;
13028
13029diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
13030index 9105655..41779c1 100644
13031--- a/arch/x86/crypto/aes-x86_64-asm_64.S
13032+++ b/arch/x86/crypto/aes-x86_64-asm_64.S
13033@@ -8,6 +8,8 @@
13034 * including this sentence is retained in full.
13035 */
13036
13037+#include <asm/alternative-asm.h>
13038+
13039 .extern crypto_ft_tab
13040 .extern crypto_it_tab
13041 .extern crypto_fl_tab
13042@@ -70,6 +72,8 @@
13043 je B192; \
13044 leaq 32(r9),r9;
13045
13046+#define ret pax_force_retaddr; ret
13047+
13048 #define epilogue(FUNC,r1,r2,r3,r4,r5,r6,r7,r8,r9) \
13049 movq r1,r2; \
13050 movq r3,r4; \
13051diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
13052index 6bd2c6c..368c93e 100644
13053--- a/arch/x86/crypto/aesni-intel_asm.S
13054+++ b/arch/x86/crypto/aesni-intel_asm.S
13055@@ -31,6 +31,7 @@
13056
13057 #include <linux/linkage.h>
13058 #include <asm/inst.h>
13059+#include <asm/alternative-asm.h>
13060
13061 /*
13062 * The following macros are used to move an (un)aligned 16 byte value to/from
13063@@ -217,7 +218,7 @@ enc: .octa 0x2
13064 * num_initial_blocks = b mod 4
13065 * encrypt the initial num_initial_blocks blocks and apply ghash on
13066 * the ciphertext
13067-* %r10, %r11, %r12, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
13068+* %r10, %r11, %r15, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
13069 * are clobbered
13070 * arg1, %arg2, %arg3, %r14 are used as a pointer only, not modified
13071 */
13072@@ -227,8 +228,8 @@ enc: .octa 0x2
13073 XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation
13074 MOVADQ SHUF_MASK(%rip), %xmm14
13075 mov arg7, %r10 # %r10 = AAD
13076- mov arg8, %r12 # %r12 = aadLen
13077- mov %r12, %r11
13078+ mov arg8, %r15 # %r15 = aadLen
13079+ mov %r15, %r11
13080 pxor %xmm\i, %xmm\i
13081
13082 _get_AAD_loop\num_initial_blocks\operation:
13083@@ -237,17 +238,17 @@ _get_AAD_loop\num_initial_blocks\operation:
13084 psrldq $4, %xmm\i
13085 pxor \TMP1, %xmm\i
13086 add $4, %r10
13087- sub $4, %r12
13088+ sub $4, %r15
13089 jne _get_AAD_loop\num_initial_blocks\operation
13090
13091 cmp $16, %r11
13092 je _get_AAD_loop2_done\num_initial_blocks\operation
13093
13094- mov $16, %r12
13095+ mov $16, %r15
13096 _get_AAD_loop2\num_initial_blocks\operation:
13097 psrldq $4, %xmm\i
13098- sub $4, %r12
13099- cmp %r11, %r12
13100+ sub $4, %r15
13101+ cmp %r11, %r15
13102 jne _get_AAD_loop2\num_initial_blocks\operation
13103
13104 _get_AAD_loop2_done\num_initial_blocks\operation:
13105@@ -442,7 +443,7 @@ _initial_blocks_done\num_initial_blocks\operation:
13106 * num_initial_blocks = b mod 4
13107 * encrypt the initial num_initial_blocks blocks and apply ghash on
13108 * the ciphertext
13109-* %r10, %r11, %r12, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
13110+* %r10, %r11, %r15, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
13111 * are clobbered
13112 * arg1, %arg2, %arg3, %r14 are used as a pointer only, not modified
13113 */
13114@@ -452,8 +453,8 @@ _initial_blocks_done\num_initial_blocks\operation:
13115 XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation
13116 MOVADQ SHUF_MASK(%rip), %xmm14
13117 mov arg7, %r10 # %r10 = AAD
13118- mov arg8, %r12 # %r12 = aadLen
13119- mov %r12, %r11
13120+ mov arg8, %r15 # %r15 = aadLen
13121+ mov %r15, %r11
13122 pxor %xmm\i, %xmm\i
13123 _get_AAD_loop\num_initial_blocks\operation:
13124 movd (%r10), \TMP1
13125@@ -461,15 +462,15 @@ _get_AAD_loop\num_initial_blocks\operation:
13126 psrldq $4, %xmm\i
13127 pxor \TMP1, %xmm\i
13128 add $4, %r10
13129- sub $4, %r12
13130+ sub $4, %r15
13131 jne _get_AAD_loop\num_initial_blocks\operation
13132 cmp $16, %r11
13133 je _get_AAD_loop2_done\num_initial_blocks\operation
13134- mov $16, %r12
13135+ mov $16, %r15
13136 _get_AAD_loop2\num_initial_blocks\operation:
13137 psrldq $4, %xmm\i
13138- sub $4, %r12
13139- cmp %r11, %r12
13140+ sub $4, %r15
13141+ cmp %r11, %r15
13142 jne _get_AAD_loop2\num_initial_blocks\operation
13143 _get_AAD_loop2_done\num_initial_blocks\operation:
13144 PSHUFB_XMM %xmm14, %xmm\i # byte-reflect the AAD data
13145@@ -1280,7 +1281,7 @@ _esb_loop_\@:
13146 *
13147 *****************************************************************************/
13148 ENTRY(aesni_gcm_dec)
13149- push %r12
13150+ push %r15
13151 push %r13
13152 push %r14
13153 mov %rsp, %r14
13154@@ -1290,8 +1291,8 @@ ENTRY(aesni_gcm_dec)
13155 */
13156 sub $VARIABLE_OFFSET, %rsp
13157 and $~63, %rsp # align rsp to 64 bytes
13158- mov %arg6, %r12
13159- movdqu (%r12), %xmm13 # %xmm13 = HashKey
13160+ mov %arg6, %r15
13161+ movdqu (%r15), %xmm13 # %xmm13 = HashKey
13162 movdqa SHUF_MASK(%rip), %xmm2
13163 PSHUFB_XMM %xmm2, %xmm13
13164
13165@@ -1319,10 +1320,10 @@ ENTRY(aesni_gcm_dec)
13166 movdqa %xmm13, HashKey(%rsp) # store HashKey<<1 (mod poly)
13167 mov %arg4, %r13 # save the number of bytes of plaintext/ciphertext
13168 and $-16, %r13 # %r13 = %r13 - (%r13 mod 16)
13169- mov %r13, %r12
13170- and $(3<<4), %r12
13171+ mov %r13, %r15
13172+ and $(3<<4), %r15
13173 jz _initial_num_blocks_is_0_decrypt
13174- cmp $(2<<4), %r12
13175+ cmp $(2<<4), %r15
13176 jb _initial_num_blocks_is_1_decrypt
13177 je _initial_num_blocks_is_2_decrypt
13178 _initial_num_blocks_is_3_decrypt:
13179@@ -1372,16 +1373,16 @@ _zero_cipher_left_decrypt:
13180 sub $16, %r11
13181 add %r13, %r11
13182 movdqu (%arg3,%r11,1), %xmm1 # receive the last <16 byte block
13183- lea SHIFT_MASK+16(%rip), %r12
13184- sub %r13, %r12
13185+ lea SHIFT_MASK+16(%rip), %r15
13186+ sub %r13, %r15
13187 # adjust the shuffle mask pointer to be able to shift 16-%r13 bytes
13188 # (%r13 is the number of bytes in plaintext mod 16)
13189- movdqu (%r12), %xmm2 # get the appropriate shuffle mask
13190+ movdqu (%r15), %xmm2 # get the appropriate shuffle mask
13191 PSHUFB_XMM %xmm2, %xmm1 # right shift 16-%r13 butes
13192
13193 movdqa %xmm1, %xmm2
13194 pxor %xmm1, %xmm0 # Ciphertext XOR E(K, Yn)
13195- movdqu ALL_F-SHIFT_MASK(%r12), %xmm1
13196+ movdqu ALL_F-SHIFT_MASK(%r15), %xmm1
13197 # get the appropriate mask to mask out top 16-%r13 bytes of %xmm0
13198 pand %xmm1, %xmm0 # mask out top 16-%r13 bytes of %xmm0
13199 pand %xmm1, %xmm2
13200@@ -1410,9 +1411,9 @@ _less_than_8_bytes_left_decrypt:
13201 sub $1, %r13
13202 jne _less_than_8_bytes_left_decrypt
13203 _multiple_of_16_bytes_decrypt:
13204- mov arg8, %r12 # %r13 = aadLen (number of bytes)
13205- shl $3, %r12 # convert into number of bits
13206- movd %r12d, %xmm15 # len(A) in %xmm15
13207+ mov arg8, %r15 # %r13 = aadLen (number of bytes)
13208+ shl $3, %r15 # convert into number of bits
13209+ movd %r15d, %xmm15 # len(A) in %xmm15
13210 shl $3, %arg4 # len(C) in bits (*128)
13211 MOVQ_R64_XMM %arg4, %xmm1
13212 pslldq $8, %xmm15 # %xmm15 = len(A)||0x0000000000000000
13213@@ -1451,7 +1452,8 @@ _return_T_done_decrypt:
13214 mov %r14, %rsp
13215 pop %r14
13216 pop %r13
13217- pop %r12
13218+ pop %r15
13219+ pax_force_retaddr
13220 ret
13221 ENDPROC(aesni_gcm_dec)
13222
13223@@ -1540,7 +1542,7 @@ ENDPROC(aesni_gcm_dec)
13224 * poly = x^128 + x^127 + x^126 + x^121 + 1
13225 ***************************************************************************/
13226 ENTRY(aesni_gcm_enc)
13227- push %r12
13228+ push %r15
13229 push %r13
13230 push %r14
13231 mov %rsp, %r14
13232@@ -1550,8 +1552,8 @@ ENTRY(aesni_gcm_enc)
13233 #
13234 sub $VARIABLE_OFFSET, %rsp
13235 and $~63, %rsp
13236- mov %arg6, %r12
13237- movdqu (%r12), %xmm13
13238+ mov %arg6, %r15
13239+ movdqu (%r15), %xmm13
13240 movdqa SHUF_MASK(%rip), %xmm2
13241 PSHUFB_XMM %xmm2, %xmm13
13242
13243@@ -1575,13 +1577,13 @@ ENTRY(aesni_gcm_enc)
13244 movdqa %xmm13, HashKey(%rsp)
13245 mov %arg4, %r13 # %xmm13 holds HashKey<<1 (mod poly)
13246 and $-16, %r13
13247- mov %r13, %r12
13248+ mov %r13, %r15
13249
13250 # Encrypt first few blocks
13251
13252- and $(3<<4), %r12
13253+ and $(3<<4), %r15
13254 jz _initial_num_blocks_is_0_encrypt
13255- cmp $(2<<4), %r12
13256+ cmp $(2<<4), %r15
13257 jb _initial_num_blocks_is_1_encrypt
13258 je _initial_num_blocks_is_2_encrypt
13259 _initial_num_blocks_is_3_encrypt:
13260@@ -1634,14 +1636,14 @@ _zero_cipher_left_encrypt:
13261 sub $16, %r11
13262 add %r13, %r11
13263 movdqu (%arg3,%r11,1), %xmm1 # receive the last <16 byte blocks
13264- lea SHIFT_MASK+16(%rip), %r12
13265- sub %r13, %r12
13266+ lea SHIFT_MASK+16(%rip), %r15
13267+ sub %r13, %r15
13268 # adjust the shuffle mask pointer to be able to shift 16-r13 bytes
13269 # (%r13 is the number of bytes in plaintext mod 16)
13270- movdqu (%r12), %xmm2 # get the appropriate shuffle mask
13271+ movdqu (%r15), %xmm2 # get the appropriate shuffle mask
13272 PSHUFB_XMM %xmm2, %xmm1 # shift right 16-r13 byte
13273 pxor %xmm1, %xmm0 # Plaintext XOR Encrypt(K, Yn)
13274- movdqu ALL_F-SHIFT_MASK(%r12), %xmm1
13275+ movdqu ALL_F-SHIFT_MASK(%r15), %xmm1
13276 # get the appropriate mask to mask out top 16-r13 bytes of xmm0
13277 pand %xmm1, %xmm0 # mask out top 16-r13 bytes of xmm0
13278 movdqa SHUF_MASK(%rip), %xmm10
13279@@ -1674,9 +1676,9 @@ _less_than_8_bytes_left_encrypt:
13280 sub $1, %r13
13281 jne _less_than_8_bytes_left_encrypt
13282 _multiple_of_16_bytes_encrypt:
13283- mov arg8, %r12 # %r12 = addLen (number of bytes)
13284- shl $3, %r12
13285- movd %r12d, %xmm15 # len(A) in %xmm15
13286+ mov arg8, %r15 # %r15 = addLen (number of bytes)
13287+ shl $3, %r15
13288+ movd %r15d, %xmm15 # len(A) in %xmm15
13289 shl $3, %arg4 # len(C) in bits (*128)
13290 MOVQ_R64_XMM %arg4, %xmm1
13291 pslldq $8, %xmm15 # %xmm15 = len(A)||0x0000000000000000
13292@@ -1715,7 +1717,8 @@ _return_T_done_encrypt:
13293 mov %r14, %rsp
13294 pop %r14
13295 pop %r13
13296- pop %r12
13297+ pop %r15
13298+ pax_force_retaddr
13299 ret
13300 ENDPROC(aesni_gcm_enc)
13301
13302@@ -1733,6 +1736,7 @@ _key_expansion_256a:
13303 pxor %xmm1, %xmm0
13304 movaps %xmm0, (TKEYP)
13305 add $0x10, TKEYP
13306+ pax_force_retaddr
13307 ret
13308 ENDPROC(_key_expansion_128)
13309 ENDPROC(_key_expansion_256a)
13310@@ -1759,6 +1763,7 @@ _key_expansion_192a:
13311 shufps $0b01001110, %xmm2, %xmm1
13312 movaps %xmm1, 0x10(TKEYP)
13313 add $0x20, TKEYP
13314+ pax_force_retaddr
13315 ret
13316 ENDPROC(_key_expansion_192a)
13317
13318@@ -1779,6 +1784,7 @@ _key_expansion_192b:
13319
13320 movaps %xmm0, (TKEYP)
13321 add $0x10, TKEYP
13322+ pax_force_retaddr
13323 ret
13324 ENDPROC(_key_expansion_192b)
13325
13326@@ -1792,6 +1798,7 @@ _key_expansion_256b:
13327 pxor %xmm1, %xmm2
13328 movaps %xmm2, (TKEYP)
13329 add $0x10, TKEYP
13330+ pax_force_retaddr
13331 ret
13332 ENDPROC(_key_expansion_256b)
13333
13334@@ -1905,6 +1912,7 @@ ENTRY(aesni_set_key)
13335 #ifndef __x86_64__
13336 popl KEYP
13337 #endif
13338+ pax_force_retaddr
13339 ret
13340 ENDPROC(aesni_set_key)
13341
13342@@ -1927,6 +1935,7 @@ ENTRY(aesni_enc)
13343 popl KLEN
13344 popl KEYP
13345 #endif
13346+ pax_force_retaddr
13347 ret
13348 ENDPROC(aesni_enc)
13349
13350@@ -1985,6 +1994,7 @@ _aesni_enc1:
13351 AESENC KEY STATE
13352 movaps 0x70(TKEYP), KEY
13353 AESENCLAST KEY STATE
13354+ pax_force_retaddr
13355 ret
13356 ENDPROC(_aesni_enc1)
13357
13358@@ -2094,6 +2104,7 @@ _aesni_enc4:
13359 AESENCLAST KEY STATE2
13360 AESENCLAST KEY STATE3
13361 AESENCLAST KEY STATE4
13362+ pax_force_retaddr
13363 ret
13364 ENDPROC(_aesni_enc4)
13365
13366@@ -2117,6 +2128,7 @@ ENTRY(aesni_dec)
13367 popl KLEN
13368 popl KEYP
13369 #endif
13370+ pax_force_retaddr
13371 ret
13372 ENDPROC(aesni_dec)
13373
13374@@ -2175,6 +2187,7 @@ _aesni_dec1:
13375 AESDEC KEY STATE
13376 movaps 0x70(TKEYP), KEY
13377 AESDECLAST KEY STATE
13378+ pax_force_retaddr
13379 ret
13380 ENDPROC(_aesni_dec1)
13381
13382@@ -2284,6 +2297,7 @@ _aesni_dec4:
13383 AESDECLAST KEY STATE2
13384 AESDECLAST KEY STATE3
13385 AESDECLAST KEY STATE4
13386+ pax_force_retaddr
13387 ret
13388 ENDPROC(_aesni_dec4)
13389
13390@@ -2342,6 +2356,7 @@ ENTRY(aesni_ecb_enc)
13391 popl KEYP
13392 popl LEN
13393 #endif
13394+ pax_force_retaddr
13395 ret
13396 ENDPROC(aesni_ecb_enc)
13397
13398@@ -2401,6 +2416,7 @@ ENTRY(aesni_ecb_dec)
13399 popl KEYP
13400 popl LEN
13401 #endif
13402+ pax_force_retaddr
13403 ret
13404 ENDPROC(aesni_ecb_dec)
13405
13406@@ -2443,6 +2459,7 @@ ENTRY(aesni_cbc_enc)
13407 popl LEN
13408 popl IVP
13409 #endif
13410+ pax_force_retaddr
13411 ret
13412 ENDPROC(aesni_cbc_enc)
13413
13414@@ -2534,6 +2551,7 @@ ENTRY(aesni_cbc_dec)
13415 popl LEN
13416 popl IVP
13417 #endif
13418+ pax_force_retaddr
13419 ret
13420 ENDPROC(aesni_cbc_dec)
13421
13422@@ -2561,6 +2579,7 @@ _aesni_inc_init:
13423 mov $1, TCTR_LOW
13424 MOVQ_R64_XMM TCTR_LOW INC
13425 MOVQ_R64_XMM CTR TCTR_LOW
13426+ pax_force_retaddr
13427 ret
13428 ENDPROC(_aesni_inc_init)
13429
13430@@ -2590,6 +2609,7 @@ _aesni_inc:
13431 .Linc_low:
13432 movaps CTR, IV
13433 PSHUFB_XMM BSWAP_MASK IV
13434+ pax_force_retaddr
13435 ret
13436 ENDPROC(_aesni_inc)
13437
13438@@ -2651,6 +2671,7 @@ ENTRY(aesni_ctr_enc)
13439 .Lctr_enc_ret:
13440 movups IV, (IVP)
13441 .Lctr_enc_just_ret:
13442+ pax_force_retaddr
13443 ret
13444 ENDPROC(aesni_ctr_enc)
13445
13446@@ -2777,6 +2798,7 @@ ENTRY(aesni_xts_crypt8)
13447 pxor INC, STATE4
13448 movdqu STATE4, 0x70(OUTP)
13449
13450+ pax_force_retaddr
13451 ret
13452 ENDPROC(aesni_xts_crypt8)
13453
13454diff --git a/arch/x86/crypto/blowfish-x86_64-asm_64.S b/arch/x86/crypto/blowfish-x86_64-asm_64.S
13455index 246c670..466e2d6 100644
13456--- a/arch/x86/crypto/blowfish-x86_64-asm_64.S
13457+++ b/arch/x86/crypto/blowfish-x86_64-asm_64.S
13458@@ -21,6 +21,7 @@
13459 */
13460
13461 #include <linux/linkage.h>
13462+#include <asm/alternative-asm.h>
13463
13464 .file "blowfish-x86_64-asm.S"
13465 .text
13466@@ -149,9 +150,11 @@ ENTRY(__blowfish_enc_blk)
13467 jnz .L__enc_xor;
13468
13469 write_block();
13470+ pax_force_retaddr
13471 ret;
13472 .L__enc_xor:
13473 xor_block();
13474+ pax_force_retaddr
13475 ret;
13476 ENDPROC(__blowfish_enc_blk)
13477
13478@@ -183,6 +186,7 @@ ENTRY(blowfish_dec_blk)
13479
13480 movq %r11, %rbp;
13481
13482+ pax_force_retaddr
13483 ret;
13484 ENDPROC(blowfish_dec_blk)
13485
13486@@ -334,6 +338,7 @@ ENTRY(__blowfish_enc_blk_4way)
13487
13488 popq %rbx;
13489 popq %rbp;
13490+ pax_force_retaddr
13491 ret;
13492
13493 .L__enc_xor4:
13494@@ -341,6 +346,7 @@ ENTRY(__blowfish_enc_blk_4way)
13495
13496 popq %rbx;
13497 popq %rbp;
13498+ pax_force_retaddr
13499 ret;
13500 ENDPROC(__blowfish_enc_blk_4way)
13501
13502@@ -375,5 +381,6 @@ ENTRY(blowfish_dec_blk_4way)
13503 popq %rbx;
13504 popq %rbp;
13505
13506+ pax_force_retaddr
13507 ret;
13508 ENDPROC(blowfish_dec_blk_4way)
13509diff --git a/arch/x86/crypto/camellia-aesni-avx-asm_64.S b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
13510index ce71f92..1dce7ec 100644
13511--- a/arch/x86/crypto/camellia-aesni-avx-asm_64.S
13512+++ b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
13513@@ -16,6 +16,7 @@
13514 */
13515
13516 #include <linux/linkage.h>
13517+#include <asm/alternative-asm.h>
13518
13519 #define CAMELLIA_TABLE_BYTE_LEN 272
13520
13521@@ -191,6 +192,7 @@ roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd:
13522 roundsm16(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7,
13523 %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15,
13524 %rcx, (%r9));
13525+ pax_force_retaddr
13526 ret;
13527 ENDPROC(roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
13528
13529@@ -199,6 +201,7 @@ roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab:
13530 roundsm16(%xmm4, %xmm5, %xmm6, %xmm7, %xmm0, %xmm1, %xmm2, %xmm3,
13531 %xmm12, %xmm13, %xmm14, %xmm15, %xmm8, %xmm9, %xmm10, %xmm11,
13532 %rax, (%r9));
13533+ pax_force_retaddr
13534 ret;
13535 ENDPROC(roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
13536
13537@@ -780,6 +783,7 @@ __camellia_enc_blk16:
13538 %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
13539 %xmm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 16(%rax));
13540
13541+ pax_force_retaddr
13542 ret;
13543
13544 .align 8
13545@@ -865,6 +869,7 @@ __camellia_dec_blk16:
13546 %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
13547 %xmm15, (key_table)(CTX), (%rax), 1 * 16(%rax));
13548
13549+ pax_force_retaddr
13550 ret;
13551
13552 .align 8
13553@@ -904,6 +909,7 @@ ENTRY(camellia_ecb_enc_16way)
13554 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
13555 %xmm8, %rsi);
13556
13557+ pax_force_retaddr
13558 ret;
13559 ENDPROC(camellia_ecb_enc_16way)
13560
13561@@ -932,6 +938,7 @@ ENTRY(camellia_ecb_dec_16way)
13562 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
13563 %xmm8, %rsi);
13564
13565+ pax_force_retaddr
13566 ret;
13567 ENDPROC(camellia_ecb_dec_16way)
13568
13569@@ -981,6 +988,7 @@ ENTRY(camellia_cbc_dec_16way)
13570 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
13571 %xmm8, %rsi);
13572
13573+ pax_force_retaddr
13574 ret;
13575 ENDPROC(camellia_cbc_dec_16way)
13576
13577@@ -1092,6 +1100,7 @@ ENTRY(camellia_ctr_16way)
13578 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
13579 %xmm8, %rsi);
13580
13581+ pax_force_retaddr
13582 ret;
13583 ENDPROC(camellia_ctr_16way)
13584
13585@@ -1234,6 +1243,7 @@ camellia_xts_crypt_16way:
13586 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
13587 %xmm8, %rsi);
13588
13589+ pax_force_retaddr
13590 ret;
13591 ENDPROC(camellia_xts_crypt_16way)
13592
13593diff --git a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
13594index 0e0b886..5a3123c 100644
13595--- a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
13596+++ b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
13597@@ -11,6 +11,7 @@
13598 */
13599
13600 #include <linux/linkage.h>
13601+#include <asm/alternative-asm.h>
13602
13603 #define CAMELLIA_TABLE_BYTE_LEN 272
13604
13605@@ -230,6 +231,7 @@ roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd:
13606 roundsm32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7,
13607 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15,
13608 %rcx, (%r9));
13609+ pax_force_retaddr
13610 ret;
13611 ENDPROC(roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
13612
13613@@ -238,6 +240,7 @@ roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab:
13614 roundsm32(%ymm4, %ymm5, %ymm6, %ymm7, %ymm0, %ymm1, %ymm2, %ymm3,
13615 %ymm12, %ymm13, %ymm14, %ymm15, %ymm8, %ymm9, %ymm10, %ymm11,
13616 %rax, (%r9));
13617+ pax_force_retaddr
13618 ret;
13619 ENDPROC(roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
13620
13621@@ -820,6 +823,7 @@ __camellia_enc_blk32:
13622 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
13623 %ymm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 32(%rax));
13624
13625+ pax_force_retaddr
13626 ret;
13627
13628 .align 8
13629@@ -905,6 +909,7 @@ __camellia_dec_blk32:
13630 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
13631 %ymm15, (key_table)(CTX), (%rax), 1 * 32(%rax));
13632
13633+ pax_force_retaddr
13634 ret;
13635
13636 .align 8
13637@@ -948,6 +953,7 @@ ENTRY(camellia_ecb_enc_32way)
13638
13639 vzeroupper;
13640
13641+ pax_force_retaddr
13642 ret;
13643 ENDPROC(camellia_ecb_enc_32way)
13644
13645@@ -980,6 +986,7 @@ ENTRY(camellia_ecb_dec_32way)
13646
13647 vzeroupper;
13648
13649+ pax_force_retaddr
13650 ret;
13651 ENDPROC(camellia_ecb_dec_32way)
13652
13653@@ -1046,6 +1053,7 @@ ENTRY(camellia_cbc_dec_32way)
13654
13655 vzeroupper;
13656
13657+ pax_force_retaddr
13658 ret;
13659 ENDPROC(camellia_cbc_dec_32way)
13660
13661@@ -1184,6 +1192,7 @@ ENTRY(camellia_ctr_32way)
13662
13663 vzeroupper;
13664
13665+ pax_force_retaddr
13666 ret;
13667 ENDPROC(camellia_ctr_32way)
13668
13669@@ -1349,6 +1358,7 @@ camellia_xts_crypt_32way:
13670
13671 vzeroupper;
13672
13673+ pax_force_retaddr
13674 ret;
13675 ENDPROC(camellia_xts_crypt_32way)
13676
13677diff --git a/arch/x86/crypto/camellia-x86_64-asm_64.S b/arch/x86/crypto/camellia-x86_64-asm_64.S
13678index 310319c..db3d7b5 100644
13679--- a/arch/x86/crypto/camellia-x86_64-asm_64.S
13680+++ b/arch/x86/crypto/camellia-x86_64-asm_64.S
13681@@ -21,6 +21,7 @@
13682 */
13683
13684 #include <linux/linkage.h>
13685+#include <asm/alternative-asm.h>
13686
13687 .file "camellia-x86_64-asm_64.S"
13688 .text
13689@@ -228,12 +229,14 @@ ENTRY(__camellia_enc_blk)
13690 enc_outunpack(mov, RT1);
13691
13692 movq RRBP, %rbp;
13693+ pax_force_retaddr
13694 ret;
13695
13696 .L__enc_xor:
13697 enc_outunpack(xor, RT1);
13698
13699 movq RRBP, %rbp;
13700+ pax_force_retaddr
13701 ret;
13702 ENDPROC(__camellia_enc_blk)
13703
13704@@ -272,6 +275,7 @@ ENTRY(camellia_dec_blk)
13705 dec_outunpack();
13706
13707 movq RRBP, %rbp;
13708+ pax_force_retaddr
13709 ret;
13710 ENDPROC(camellia_dec_blk)
13711
13712@@ -463,6 +467,7 @@ ENTRY(__camellia_enc_blk_2way)
13713
13714 movq RRBP, %rbp;
13715 popq %rbx;
13716+ pax_force_retaddr
13717 ret;
13718
13719 .L__enc2_xor:
13720@@ -470,6 +475,7 @@ ENTRY(__camellia_enc_blk_2way)
13721
13722 movq RRBP, %rbp;
13723 popq %rbx;
13724+ pax_force_retaddr
13725 ret;
13726 ENDPROC(__camellia_enc_blk_2way)
13727
13728@@ -510,5 +516,6 @@ ENTRY(camellia_dec_blk_2way)
13729
13730 movq RRBP, %rbp;
13731 movq RXOR, %rbx;
13732+ pax_force_retaddr
13733 ret;
13734 ENDPROC(camellia_dec_blk_2way)
13735diff --git a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
13736index c35fd5d..2d8c7db 100644
13737--- a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
13738+++ b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
13739@@ -24,6 +24,7 @@
13740 */
13741
13742 #include <linux/linkage.h>
13743+#include <asm/alternative-asm.h>
13744
13745 .file "cast5-avx-x86_64-asm_64.S"
13746
13747@@ -281,6 +282,7 @@ __cast5_enc_blk16:
13748 outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
13749 outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
13750
13751+ pax_force_retaddr
13752 ret;
13753 ENDPROC(__cast5_enc_blk16)
13754
13755@@ -352,6 +354,7 @@ __cast5_dec_blk16:
13756 outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
13757 outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
13758
13759+ pax_force_retaddr
13760 ret;
13761
13762 .L__skip_dec:
13763@@ -388,6 +391,7 @@ ENTRY(cast5_ecb_enc_16way)
13764 vmovdqu RR4, (6*4*4)(%r11);
13765 vmovdqu RL4, (7*4*4)(%r11);
13766
13767+ pax_force_retaddr
13768 ret;
13769 ENDPROC(cast5_ecb_enc_16way)
13770
13771@@ -420,6 +424,7 @@ ENTRY(cast5_ecb_dec_16way)
13772 vmovdqu RR4, (6*4*4)(%r11);
13773 vmovdqu RL4, (7*4*4)(%r11);
13774
13775+ pax_force_retaddr
13776 ret;
13777 ENDPROC(cast5_ecb_dec_16way)
13778
13779@@ -430,10 +435,10 @@ ENTRY(cast5_cbc_dec_16way)
13780 * %rdx: src
13781 */
13782
13783- pushq %r12;
13784+ pushq %r14;
13785
13786 movq %rsi, %r11;
13787- movq %rdx, %r12;
13788+ movq %rdx, %r14;
13789
13790 vmovdqu (0*16)(%rdx), RL1;
13791 vmovdqu (1*16)(%rdx), RR1;
13792@@ -447,16 +452,16 @@ ENTRY(cast5_cbc_dec_16way)
13793 call __cast5_dec_blk16;
13794
13795 /* xor with src */
13796- vmovq (%r12), RX;
13797+ vmovq (%r14), RX;
13798 vpshufd $0x4f, RX, RX;
13799 vpxor RX, RR1, RR1;
13800- vpxor 0*16+8(%r12), RL1, RL1;
13801- vpxor 1*16+8(%r12), RR2, RR2;
13802- vpxor 2*16+8(%r12), RL2, RL2;
13803- vpxor 3*16+8(%r12), RR3, RR3;
13804- vpxor 4*16+8(%r12), RL3, RL3;
13805- vpxor 5*16+8(%r12), RR4, RR4;
13806- vpxor 6*16+8(%r12), RL4, RL4;
13807+ vpxor 0*16+8(%r14), RL1, RL1;
13808+ vpxor 1*16+8(%r14), RR2, RR2;
13809+ vpxor 2*16+8(%r14), RL2, RL2;
13810+ vpxor 3*16+8(%r14), RR3, RR3;
13811+ vpxor 4*16+8(%r14), RL3, RL3;
13812+ vpxor 5*16+8(%r14), RR4, RR4;
13813+ vpxor 6*16+8(%r14), RL4, RL4;
13814
13815 vmovdqu RR1, (0*16)(%r11);
13816 vmovdqu RL1, (1*16)(%r11);
13817@@ -467,8 +472,9 @@ ENTRY(cast5_cbc_dec_16way)
13818 vmovdqu RR4, (6*16)(%r11);
13819 vmovdqu RL4, (7*16)(%r11);
13820
13821- popq %r12;
13822+ popq %r14;
13823
13824+ pax_force_retaddr
13825 ret;
13826 ENDPROC(cast5_cbc_dec_16way)
13827
13828@@ -480,10 +486,10 @@ ENTRY(cast5_ctr_16way)
13829 * %rcx: iv (big endian, 64bit)
13830 */
13831
13832- pushq %r12;
13833+ pushq %r14;
13834
13835 movq %rsi, %r11;
13836- movq %rdx, %r12;
13837+ movq %rdx, %r14;
13838
13839 vpcmpeqd RTMP, RTMP, RTMP;
13840 vpsrldq $8, RTMP, RTMP; /* low: -1, high: 0 */
13841@@ -523,14 +529,14 @@ ENTRY(cast5_ctr_16way)
13842 call __cast5_enc_blk16;
13843
13844 /* dst = src ^ iv */
13845- vpxor (0*16)(%r12), RR1, RR1;
13846- vpxor (1*16)(%r12), RL1, RL1;
13847- vpxor (2*16)(%r12), RR2, RR2;
13848- vpxor (3*16)(%r12), RL2, RL2;
13849- vpxor (4*16)(%r12), RR3, RR3;
13850- vpxor (5*16)(%r12), RL3, RL3;
13851- vpxor (6*16)(%r12), RR4, RR4;
13852- vpxor (7*16)(%r12), RL4, RL4;
13853+ vpxor (0*16)(%r14), RR1, RR1;
13854+ vpxor (1*16)(%r14), RL1, RL1;
13855+ vpxor (2*16)(%r14), RR2, RR2;
13856+ vpxor (3*16)(%r14), RL2, RL2;
13857+ vpxor (4*16)(%r14), RR3, RR3;
13858+ vpxor (5*16)(%r14), RL3, RL3;
13859+ vpxor (6*16)(%r14), RR4, RR4;
13860+ vpxor (7*16)(%r14), RL4, RL4;
13861 vmovdqu RR1, (0*16)(%r11);
13862 vmovdqu RL1, (1*16)(%r11);
13863 vmovdqu RR2, (2*16)(%r11);
13864@@ -540,7 +546,8 @@ ENTRY(cast5_ctr_16way)
13865 vmovdqu RR4, (6*16)(%r11);
13866 vmovdqu RL4, (7*16)(%r11);
13867
13868- popq %r12;
13869+ popq %r14;
13870
13871+ pax_force_retaddr
13872 ret;
13873 ENDPROC(cast5_ctr_16way)
13874diff --git a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
13875index e3531f8..e123f35 100644
13876--- a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
13877+++ b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
13878@@ -24,6 +24,7 @@
13879 */
13880
13881 #include <linux/linkage.h>
13882+#include <asm/alternative-asm.h>
13883 #include "glue_helper-asm-avx.S"
13884
13885 .file "cast6-avx-x86_64-asm_64.S"
13886@@ -295,6 +296,7 @@ __cast6_enc_blk8:
13887 outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
13888 outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
13889
13890+ pax_force_retaddr
13891 ret;
13892 ENDPROC(__cast6_enc_blk8)
13893
13894@@ -340,6 +342,7 @@ __cast6_dec_blk8:
13895 outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
13896 outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
13897
13898+ pax_force_retaddr
13899 ret;
13900 ENDPROC(__cast6_dec_blk8)
13901
13902@@ -358,6 +361,7 @@ ENTRY(cast6_ecb_enc_8way)
13903
13904 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13905
13906+ pax_force_retaddr
13907 ret;
13908 ENDPROC(cast6_ecb_enc_8way)
13909
13910@@ -376,6 +380,7 @@ ENTRY(cast6_ecb_dec_8way)
13911
13912 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13913
13914+ pax_force_retaddr
13915 ret;
13916 ENDPROC(cast6_ecb_dec_8way)
13917
13918@@ -386,19 +391,20 @@ ENTRY(cast6_cbc_dec_8way)
13919 * %rdx: src
13920 */
13921
13922- pushq %r12;
13923+ pushq %r14;
13924
13925 movq %rsi, %r11;
13926- movq %rdx, %r12;
13927+ movq %rdx, %r14;
13928
13929 load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13930
13931 call __cast6_dec_blk8;
13932
13933- store_cbc_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13934+ store_cbc_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13935
13936- popq %r12;
13937+ popq %r14;
13938
13939+ pax_force_retaddr
13940 ret;
13941 ENDPROC(cast6_cbc_dec_8way)
13942
13943@@ -410,20 +416,21 @@ ENTRY(cast6_ctr_8way)
13944 * %rcx: iv (little endian, 128bit)
13945 */
13946
13947- pushq %r12;
13948+ pushq %r14;
13949
13950 movq %rsi, %r11;
13951- movq %rdx, %r12;
13952+ movq %rdx, %r14;
13953
13954 load_ctr_8way(%rcx, .Lbswap128_mask, RA1, RB1, RC1, RD1, RA2, RB2, RC2,
13955 RD2, RX, RKR, RKM);
13956
13957 call __cast6_enc_blk8;
13958
13959- store_ctr_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13960+ store_ctr_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13961
13962- popq %r12;
13963+ popq %r14;
13964
13965+ pax_force_retaddr
13966 ret;
13967 ENDPROC(cast6_ctr_8way)
13968
13969@@ -446,6 +453,7 @@ ENTRY(cast6_xts_enc_8way)
13970 /* dst <= regs xor IVs(in dst) */
13971 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13972
13973+ pax_force_retaddr
13974 ret;
13975 ENDPROC(cast6_xts_enc_8way)
13976
13977@@ -468,5 +476,6 @@ ENTRY(cast6_xts_dec_8way)
13978 /* dst <= regs xor IVs(in dst) */
13979 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13980
13981+ pax_force_retaddr
13982 ret;
13983 ENDPROC(cast6_xts_dec_8way)
13984diff --git a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
13985index 26d49eb..8bf39c8 100644
13986--- a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
13987+++ b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
13988@@ -45,6 +45,7 @@
13989
13990 #include <asm/inst.h>
13991 #include <linux/linkage.h>
13992+#include <asm/alternative-asm.h>
13993
13994 ## ISCSI CRC 32 Implementation with crc32 and pclmulqdq Instruction
13995
13996@@ -309,6 +310,7 @@ do_return:
13997 popq %rsi
13998 popq %rdi
13999 popq %rbx
14000+ pax_force_retaddr
14001 ret
14002
14003 ################################################################
14004@@ -330,7 +332,7 @@ ENDPROC(crc_pcl)
14005 ## PCLMULQDQ tables
14006 ## Table is 128 entries x 2 words (8 bytes) each
14007 ################################################################
14008-.section .rotata, "a", %progbits
14009+.section .rodata, "a", %progbits
14010 .align 8
14011 K_table:
14012 .long 0x493c7d27, 0x00000001
14013diff --git a/arch/x86/crypto/ghash-clmulni-intel_asm.S b/arch/x86/crypto/ghash-clmulni-intel_asm.S
14014index 5d1e007..098cb4f 100644
14015--- a/arch/x86/crypto/ghash-clmulni-intel_asm.S
14016+++ b/arch/x86/crypto/ghash-clmulni-intel_asm.S
14017@@ -18,6 +18,7 @@
14018
14019 #include <linux/linkage.h>
14020 #include <asm/inst.h>
14021+#include <asm/alternative-asm.h>
14022
14023 .data
14024
14025@@ -89,6 +90,7 @@ __clmul_gf128mul_ble:
14026 psrlq $1, T2
14027 pxor T2, T1
14028 pxor T1, DATA
14029+ pax_force_retaddr
14030 ret
14031 ENDPROC(__clmul_gf128mul_ble)
14032
14033@@ -101,6 +103,7 @@ ENTRY(clmul_ghash_mul)
14034 call __clmul_gf128mul_ble
14035 PSHUFB_XMM BSWAP DATA
14036 movups DATA, (%rdi)
14037+ pax_force_retaddr
14038 ret
14039 ENDPROC(clmul_ghash_mul)
14040
14041@@ -128,5 +131,6 @@ ENTRY(clmul_ghash_update)
14042 PSHUFB_XMM BSWAP DATA
14043 movups DATA, (%rdi)
14044 .Lupdate_just_ret:
14045+ pax_force_retaddr
14046 ret
14047 ENDPROC(clmul_ghash_update)
14048diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
14049index 9279e0b..c4b3d2c 100644
14050--- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
14051+++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
14052@@ -1,4 +1,5 @@
14053 #include <linux/linkage.h>
14054+#include <asm/alternative-asm.h>
14055
14056 # enter salsa20_encrypt_bytes
14057 ENTRY(salsa20_encrypt_bytes)
14058@@ -789,6 +790,7 @@ ENTRY(salsa20_encrypt_bytes)
14059 add %r11,%rsp
14060 mov %rdi,%rax
14061 mov %rsi,%rdx
14062+ pax_force_retaddr
14063 ret
14064 # bytesatleast65:
14065 ._bytesatleast65:
14066@@ -889,6 +891,7 @@ ENTRY(salsa20_keysetup)
14067 add %r11,%rsp
14068 mov %rdi,%rax
14069 mov %rsi,%rdx
14070+ pax_force_retaddr
14071 ret
14072 ENDPROC(salsa20_keysetup)
14073
14074@@ -914,5 +917,6 @@ ENTRY(salsa20_ivsetup)
14075 add %r11,%rsp
14076 mov %rdi,%rax
14077 mov %rsi,%rdx
14078+ pax_force_retaddr
14079 ret
14080 ENDPROC(salsa20_ivsetup)
14081diff --git a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
14082index 2f202f4..d9164d6 100644
14083--- a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
14084+++ b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
14085@@ -24,6 +24,7 @@
14086 */
14087
14088 #include <linux/linkage.h>
14089+#include <asm/alternative-asm.h>
14090 #include "glue_helper-asm-avx.S"
14091
14092 .file "serpent-avx-x86_64-asm_64.S"
14093@@ -618,6 +619,7 @@ __serpent_enc_blk8_avx:
14094 write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2);
14095 write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2);
14096
14097+ pax_force_retaddr
14098 ret;
14099 ENDPROC(__serpent_enc_blk8_avx)
14100
14101@@ -672,6 +674,7 @@ __serpent_dec_blk8_avx:
14102 write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2);
14103 write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2);
14104
14105+ pax_force_retaddr
14106 ret;
14107 ENDPROC(__serpent_dec_blk8_avx)
14108
14109@@ -688,6 +691,7 @@ ENTRY(serpent_ecb_enc_8way_avx)
14110
14111 store_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14112
14113+ pax_force_retaddr
14114 ret;
14115 ENDPROC(serpent_ecb_enc_8way_avx)
14116
14117@@ -704,6 +708,7 @@ ENTRY(serpent_ecb_dec_8way_avx)
14118
14119 store_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
14120
14121+ pax_force_retaddr
14122 ret;
14123 ENDPROC(serpent_ecb_dec_8way_avx)
14124
14125@@ -720,6 +725,7 @@ ENTRY(serpent_cbc_dec_8way_avx)
14126
14127 store_cbc_8way(%rdx, %rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
14128
14129+ pax_force_retaddr
14130 ret;
14131 ENDPROC(serpent_cbc_dec_8way_avx)
14132
14133@@ -738,6 +744,7 @@ ENTRY(serpent_ctr_8way_avx)
14134
14135 store_ctr_8way(%rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14136
14137+ pax_force_retaddr
14138 ret;
14139 ENDPROC(serpent_ctr_8way_avx)
14140
14141@@ -758,6 +765,7 @@ ENTRY(serpent_xts_enc_8way_avx)
14142 /* dst <= regs xor IVs(in dst) */
14143 store_xts_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14144
14145+ pax_force_retaddr
14146 ret;
14147 ENDPROC(serpent_xts_enc_8way_avx)
14148
14149@@ -778,5 +786,6 @@ ENTRY(serpent_xts_dec_8way_avx)
14150 /* dst <= regs xor IVs(in dst) */
14151 store_xts_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
14152
14153+ pax_force_retaddr
14154 ret;
14155 ENDPROC(serpent_xts_dec_8way_avx)
14156diff --git a/arch/x86/crypto/serpent-avx2-asm_64.S b/arch/x86/crypto/serpent-avx2-asm_64.S
14157index b222085..abd483c 100644
14158--- a/arch/x86/crypto/serpent-avx2-asm_64.S
14159+++ b/arch/x86/crypto/serpent-avx2-asm_64.S
14160@@ -15,6 +15,7 @@
14161 */
14162
14163 #include <linux/linkage.h>
14164+#include <asm/alternative-asm.h>
14165 #include "glue_helper-asm-avx2.S"
14166
14167 .file "serpent-avx2-asm_64.S"
14168@@ -610,6 +611,7 @@ __serpent_enc_blk16:
14169 write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2);
14170 write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2);
14171
14172+ pax_force_retaddr
14173 ret;
14174 ENDPROC(__serpent_enc_blk16)
14175
14176@@ -664,6 +666,7 @@ __serpent_dec_blk16:
14177 write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2);
14178 write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2);
14179
14180+ pax_force_retaddr
14181 ret;
14182 ENDPROC(__serpent_dec_blk16)
14183
14184@@ -684,6 +687,7 @@ ENTRY(serpent_ecb_enc_16way)
14185
14186 vzeroupper;
14187
14188+ pax_force_retaddr
14189 ret;
14190 ENDPROC(serpent_ecb_enc_16way)
14191
14192@@ -704,6 +708,7 @@ ENTRY(serpent_ecb_dec_16way)
14193
14194 vzeroupper;
14195
14196+ pax_force_retaddr
14197 ret;
14198 ENDPROC(serpent_ecb_dec_16way)
14199
14200@@ -725,6 +730,7 @@ ENTRY(serpent_cbc_dec_16way)
14201
14202 vzeroupper;
14203
14204+ pax_force_retaddr
14205 ret;
14206 ENDPROC(serpent_cbc_dec_16way)
14207
14208@@ -748,6 +754,7 @@ ENTRY(serpent_ctr_16way)
14209
14210 vzeroupper;
14211
14212+ pax_force_retaddr
14213 ret;
14214 ENDPROC(serpent_ctr_16way)
14215
14216@@ -772,6 +779,7 @@ ENTRY(serpent_xts_enc_16way)
14217
14218 vzeroupper;
14219
14220+ pax_force_retaddr
14221 ret;
14222 ENDPROC(serpent_xts_enc_16way)
14223
14224@@ -796,5 +804,6 @@ ENTRY(serpent_xts_dec_16way)
14225
14226 vzeroupper;
14227
14228+ pax_force_retaddr
14229 ret;
14230 ENDPROC(serpent_xts_dec_16way)
14231diff --git a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
14232index acc066c..1559cc4 100644
14233--- a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
14234+++ b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
14235@@ -25,6 +25,7 @@
14236 */
14237
14238 #include <linux/linkage.h>
14239+#include <asm/alternative-asm.h>
14240
14241 .file "serpent-sse2-x86_64-asm_64.S"
14242 .text
14243@@ -690,12 +691,14 @@ ENTRY(__serpent_enc_blk_8way)
14244 write_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
14245 write_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
14246
14247+ pax_force_retaddr
14248 ret;
14249
14250 .L__enc_xor8:
14251 xor_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
14252 xor_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
14253
14254+ pax_force_retaddr
14255 ret;
14256 ENDPROC(__serpent_enc_blk_8way)
14257
14258@@ -750,5 +753,6 @@ ENTRY(serpent_dec_blk_8way)
14259 write_blocks(%rsi, RC1, RD1, RB1, RE1, RK0, RK1, RK2);
14260 write_blocks(%rax, RC2, RD2, RB2, RE2, RK0, RK1, RK2);
14261
14262+ pax_force_retaddr
14263 ret;
14264 ENDPROC(serpent_dec_blk_8way)
14265diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S
14266index a410950..9dfe7ad 100644
14267--- a/arch/x86/crypto/sha1_ssse3_asm.S
14268+++ b/arch/x86/crypto/sha1_ssse3_asm.S
14269@@ -29,6 +29,7 @@
14270 */
14271
14272 #include <linux/linkage.h>
14273+#include <asm/alternative-asm.h>
14274
14275 #define CTX %rdi // arg1
14276 #define BUF %rsi // arg2
14277@@ -75,9 +76,9 @@
14278
14279 push %rbx
14280 push %rbp
14281- push %r12
14282+ push %r14
14283
14284- mov %rsp, %r12
14285+ mov %rsp, %r14
14286 sub $64, %rsp # allocate workspace
14287 and $~15, %rsp # align stack
14288
14289@@ -99,11 +100,12 @@
14290 xor %rax, %rax
14291 rep stosq
14292
14293- mov %r12, %rsp # deallocate workspace
14294+ mov %r14, %rsp # deallocate workspace
14295
14296- pop %r12
14297+ pop %r14
14298 pop %rbp
14299 pop %rbx
14300+ pax_force_retaddr
14301 ret
14302
14303 ENDPROC(\name)
14304diff --git a/arch/x86/crypto/sha256-avx-asm.S b/arch/x86/crypto/sha256-avx-asm.S
14305index 642f156..51a513c 100644
14306--- a/arch/x86/crypto/sha256-avx-asm.S
14307+++ b/arch/x86/crypto/sha256-avx-asm.S
14308@@ -49,6 +49,7 @@
14309
14310 #ifdef CONFIG_AS_AVX
14311 #include <linux/linkage.h>
14312+#include <asm/alternative-asm.h>
14313
14314 ## assume buffers not aligned
14315 #define VMOVDQ vmovdqu
14316@@ -460,6 +461,7 @@ done_hash:
14317 popq %r13
14318 popq %rbp
14319 popq %rbx
14320+ pax_force_retaddr
14321 ret
14322 ENDPROC(sha256_transform_avx)
14323
14324diff --git a/arch/x86/crypto/sha256-avx2-asm.S b/arch/x86/crypto/sha256-avx2-asm.S
14325index 9e86944..3795e6a 100644
14326--- a/arch/x86/crypto/sha256-avx2-asm.S
14327+++ b/arch/x86/crypto/sha256-avx2-asm.S
14328@@ -50,6 +50,7 @@
14329
14330 #ifdef CONFIG_AS_AVX2
14331 #include <linux/linkage.h>
14332+#include <asm/alternative-asm.h>
14333
14334 ## assume buffers not aligned
14335 #define VMOVDQ vmovdqu
14336@@ -720,6 +721,7 @@ done_hash:
14337 popq %r12
14338 popq %rbp
14339 popq %rbx
14340+ pax_force_retaddr
14341 ret
14342 ENDPROC(sha256_transform_rorx)
14343
14344diff --git a/arch/x86/crypto/sha256-ssse3-asm.S b/arch/x86/crypto/sha256-ssse3-asm.S
14345index f833b74..8c62a9e 100644
14346--- a/arch/x86/crypto/sha256-ssse3-asm.S
14347+++ b/arch/x86/crypto/sha256-ssse3-asm.S
14348@@ -47,6 +47,7 @@
14349 ########################################################################
14350
14351 #include <linux/linkage.h>
14352+#include <asm/alternative-asm.h>
14353
14354 ## assume buffers not aligned
14355 #define MOVDQ movdqu
14356@@ -471,6 +472,7 @@ done_hash:
14357 popq %rbp
14358 popq %rbx
14359
14360+ pax_force_retaddr
14361 ret
14362 ENDPROC(sha256_transform_ssse3)
14363
14364diff --git a/arch/x86/crypto/sha512-avx-asm.S b/arch/x86/crypto/sha512-avx-asm.S
14365index 974dde9..a823ff9 100644
14366--- a/arch/x86/crypto/sha512-avx-asm.S
14367+++ b/arch/x86/crypto/sha512-avx-asm.S
14368@@ -49,6 +49,7 @@
14369
14370 #ifdef CONFIG_AS_AVX
14371 #include <linux/linkage.h>
14372+#include <asm/alternative-asm.h>
14373
14374 .text
14375
14376@@ -364,6 +365,7 @@ updateblock:
14377 mov frame_RSPSAVE(%rsp), %rsp
14378
14379 nowork:
14380+ pax_force_retaddr
14381 ret
14382 ENDPROC(sha512_transform_avx)
14383
14384diff --git a/arch/x86/crypto/sha512-avx2-asm.S b/arch/x86/crypto/sha512-avx2-asm.S
14385index 568b961..ed20c37 100644
14386--- a/arch/x86/crypto/sha512-avx2-asm.S
14387+++ b/arch/x86/crypto/sha512-avx2-asm.S
14388@@ -51,6 +51,7 @@
14389
14390 #ifdef CONFIG_AS_AVX2
14391 #include <linux/linkage.h>
14392+#include <asm/alternative-asm.h>
14393
14394 .text
14395
14396@@ -678,6 +679,7 @@ done_hash:
14397
14398 # Restore Stack Pointer
14399 mov frame_RSPSAVE(%rsp), %rsp
14400+ pax_force_retaddr
14401 ret
14402 ENDPROC(sha512_transform_rorx)
14403
14404diff --git a/arch/x86/crypto/sha512-ssse3-asm.S b/arch/x86/crypto/sha512-ssse3-asm.S
14405index fb56855..6edd768 100644
14406--- a/arch/x86/crypto/sha512-ssse3-asm.S
14407+++ b/arch/x86/crypto/sha512-ssse3-asm.S
14408@@ -48,6 +48,7 @@
14409 ########################################################################
14410
14411 #include <linux/linkage.h>
14412+#include <asm/alternative-asm.h>
14413
14414 .text
14415
14416@@ -363,6 +364,7 @@ updateblock:
14417 mov frame_RSPSAVE(%rsp), %rsp
14418
14419 nowork:
14420+ pax_force_retaddr
14421 ret
14422 ENDPROC(sha512_transform_ssse3)
14423
14424diff --git a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
14425index 0505813..b067311 100644
14426--- a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
14427+++ b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
14428@@ -24,6 +24,7 @@
14429 */
14430
14431 #include <linux/linkage.h>
14432+#include <asm/alternative-asm.h>
14433 #include "glue_helper-asm-avx.S"
14434
14435 .file "twofish-avx-x86_64-asm_64.S"
14436@@ -284,6 +285,7 @@ __twofish_enc_blk8:
14437 outunpack_blocks(RC1, RD1, RA1, RB1, RK1, RX0, RY0, RK2);
14438 outunpack_blocks(RC2, RD2, RA2, RB2, RK1, RX0, RY0, RK2);
14439
14440+ pax_force_retaddr
14441 ret;
14442 ENDPROC(__twofish_enc_blk8)
14443
14444@@ -324,6 +326,7 @@ __twofish_dec_blk8:
14445 outunpack_blocks(RA1, RB1, RC1, RD1, RK1, RX0, RY0, RK2);
14446 outunpack_blocks(RA2, RB2, RC2, RD2, RK1, RX0, RY0, RK2);
14447
14448+ pax_force_retaddr
14449 ret;
14450 ENDPROC(__twofish_dec_blk8)
14451
14452@@ -342,6 +345,7 @@ ENTRY(twofish_ecb_enc_8way)
14453
14454 store_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
14455
14456+ pax_force_retaddr
14457 ret;
14458 ENDPROC(twofish_ecb_enc_8way)
14459
14460@@ -360,6 +364,7 @@ ENTRY(twofish_ecb_dec_8way)
14461
14462 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14463
14464+ pax_force_retaddr
14465 ret;
14466 ENDPROC(twofish_ecb_dec_8way)
14467
14468@@ -370,19 +375,20 @@ ENTRY(twofish_cbc_dec_8way)
14469 * %rdx: src
14470 */
14471
14472- pushq %r12;
14473+ pushq %r14;
14474
14475 movq %rsi, %r11;
14476- movq %rdx, %r12;
14477+ movq %rdx, %r14;
14478
14479 load_8way(%rdx, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
14480
14481 call __twofish_dec_blk8;
14482
14483- store_cbc_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14484+ store_cbc_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14485
14486- popq %r12;
14487+ popq %r14;
14488
14489+ pax_force_retaddr
14490 ret;
14491 ENDPROC(twofish_cbc_dec_8way)
14492
14493@@ -394,20 +400,21 @@ ENTRY(twofish_ctr_8way)
14494 * %rcx: iv (little endian, 128bit)
14495 */
14496
14497- pushq %r12;
14498+ pushq %r14;
14499
14500 movq %rsi, %r11;
14501- movq %rdx, %r12;
14502+ movq %rdx, %r14;
14503
14504 load_ctr_8way(%rcx, .Lbswap128_mask, RA1, RB1, RC1, RD1, RA2, RB2, RC2,
14505 RD2, RX0, RX1, RY0);
14506
14507 call __twofish_enc_blk8;
14508
14509- store_ctr_8way(%r12, %r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
14510+ store_ctr_8way(%r14, %r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
14511
14512- popq %r12;
14513+ popq %r14;
14514
14515+ pax_force_retaddr
14516 ret;
14517 ENDPROC(twofish_ctr_8way)
14518
14519@@ -430,6 +437,7 @@ ENTRY(twofish_xts_enc_8way)
14520 /* dst <= regs xor IVs(in dst) */
14521 store_xts_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
14522
14523+ pax_force_retaddr
14524 ret;
14525 ENDPROC(twofish_xts_enc_8way)
14526
14527@@ -452,5 +460,6 @@ ENTRY(twofish_xts_dec_8way)
14528 /* dst <= regs xor IVs(in dst) */
14529 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14530
14531+ pax_force_retaddr
14532 ret;
14533 ENDPROC(twofish_xts_dec_8way)
14534diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
14535index 1c3b7ce..02f578d 100644
14536--- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
14537+++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
14538@@ -21,6 +21,7 @@
14539 */
14540
14541 #include <linux/linkage.h>
14542+#include <asm/alternative-asm.h>
14543
14544 .file "twofish-x86_64-asm-3way.S"
14545 .text
14546@@ -258,6 +259,7 @@ ENTRY(__twofish_enc_blk_3way)
14547 popq %r13;
14548 popq %r14;
14549 popq %r15;
14550+ pax_force_retaddr
14551 ret;
14552
14553 .L__enc_xor3:
14554@@ -269,6 +271,7 @@ ENTRY(__twofish_enc_blk_3way)
14555 popq %r13;
14556 popq %r14;
14557 popq %r15;
14558+ pax_force_retaddr
14559 ret;
14560 ENDPROC(__twofish_enc_blk_3way)
14561
14562@@ -308,5 +311,6 @@ ENTRY(twofish_dec_blk_3way)
14563 popq %r13;
14564 popq %r14;
14565 popq %r15;
14566+ pax_force_retaddr
14567 ret;
14568 ENDPROC(twofish_dec_blk_3way)
14569diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
14570index a039d21..524b8b2 100644
14571--- a/arch/x86/crypto/twofish-x86_64-asm_64.S
14572+++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
14573@@ -22,6 +22,7 @@
14574
14575 #include <linux/linkage.h>
14576 #include <asm/asm-offsets.h>
14577+#include <asm/alternative-asm.h>
14578
14579 #define a_offset 0
14580 #define b_offset 4
14581@@ -265,6 +266,7 @@ ENTRY(twofish_enc_blk)
14582
14583 popq R1
14584 movq $1,%rax
14585+ pax_force_retaddr
14586 ret
14587 ENDPROC(twofish_enc_blk)
14588
14589@@ -317,5 +319,6 @@ ENTRY(twofish_dec_blk)
14590
14591 popq R1
14592 movq $1,%rax
14593+ pax_force_retaddr
14594 ret
14595 ENDPROC(twofish_dec_blk)
14596diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
14597index ae6aad1..719d6d9 100644
14598--- a/arch/x86/ia32/ia32_aout.c
14599+++ b/arch/x86/ia32/ia32_aout.c
14600@@ -153,6 +153,8 @@ static int aout_core_dump(struct coredump_params *cprm)
14601 unsigned long dump_start, dump_size;
14602 struct user32 dump;
14603
14604+ memset(&dump, 0, sizeof(dump));
14605+
14606 fs = get_fs();
14607 set_fs(KERNEL_DS);
14608 has_dumped = 1;
14609diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
14610index d0165c9..0d5639b 100644
14611--- a/arch/x86/ia32/ia32_signal.c
14612+++ b/arch/x86/ia32/ia32_signal.c
14613@@ -218,7 +218,7 @@ asmlinkage long sys32_sigreturn(void)
14614 if (__get_user(set.sig[0], &frame->sc.oldmask)
14615 || (_COMPAT_NSIG_WORDS > 1
14616 && __copy_from_user((((char *) &set.sig) + 4),
14617- &frame->extramask,
14618+ frame->extramask,
14619 sizeof(frame->extramask))))
14620 goto badframe;
14621
14622@@ -338,7 +338,7 @@ static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
14623 sp -= frame_size;
14624 /* Align the stack pointer according to the i386 ABI,
14625 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
14626- sp = ((sp + 4) & -16ul) - 4;
14627+ sp = ((sp - 12) & -16ul) - 4;
14628 return (void __user *) sp;
14629 }
14630
14631@@ -383,10 +383,10 @@ int ia32_setup_frame(int sig, struct ksignal *ksig,
14632 } else {
14633 /* Return stub is in 32bit vsyscall page */
14634 if (current->mm->context.vdso)
14635- restorer = current->mm->context.vdso +
14636- selected_vdso32->sym___kernel_sigreturn;
14637+ restorer = (void __force_user *)(current->mm->context.vdso +
14638+ selected_vdso32->sym___kernel_sigreturn);
14639 else
14640- restorer = &frame->retcode;
14641+ restorer = frame->retcode;
14642 }
14643
14644 put_user_try {
14645@@ -396,7 +396,7 @@ int ia32_setup_frame(int sig, struct ksignal *ksig,
14646 * These are actually not used anymore, but left because some
14647 * gdb versions depend on them as a marker.
14648 */
14649- put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
14650+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
14651 } put_user_catch(err);
14652
14653 if (err)
14654@@ -438,7 +438,7 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
14655 0xb8,
14656 __NR_ia32_rt_sigreturn,
14657 0x80cd,
14658- 0,
14659+ 0
14660 };
14661
14662 frame = get_sigframe(ksig, regs, sizeof(*frame), &fpstate);
14663@@ -461,16 +461,19 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
14664
14665 if (ksig->ka.sa.sa_flags & SA_RESTORER)
14666 restorer = ksig->ka.sa.sa_restorer;
14667+ else if (current->mm->context.vdso)
14668+ /* Return stub is in 32bit vsyscall page */
14669+ restorer = (void __force_user *)(current->mm->context.vdso +
14670+ selected_vdso32->sym___kernel_rt_sigreturn);
14671 else
14672- restorer = current->mm->context.vdso +
14673- selected_vdso32->sym___kernel_rt_sigreturn;
14674+ restorer = frame->retcode;
14675 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
14676
14677 /*
14678 * Not actually used anymore, but left because some gdb
14679 * versions need it.
14680 */
14681- put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
14682+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
14683 } put_user_catch(err);
14684
14685 err |= copy_siginfo_to_user32(&frame->info, &ksig->info);
14686diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
14687index 156ebca..9591cf0 100644
14688--- a/arch/x86/ia32/ia32entry.S
14689+++ b/arch/x86/ia32/ia32entry.S
14690@@ -15,8 +15,10 @@
14691 #include <asm/irqflags.h>
14692 #include <asm/asm.h>
14693 #include <asm/smap.h>
14694+#include <asm/pgtable.h>
14695 #include <linux/linkage.h>
14696 #include <linux/err.h>
14697+#include <asm/alternative-asm.h>
14698
14699 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
14700 #include <linux/elf-em.h>
14701@@ -62,12 +64,12 @@
14702 */
14703 .macro LOAD_ARGS32 offset, _r9=0
14704 .if \_r9
14705- movl \offset+16(%rsp),%r9d
14706+ movl \offset+R9(%rsp),%r9d
14707 .endif
14708- movl \offset+40(%rsp),%ecx
14709- movl \offset+48(%rsp),%edx
14710- movl \offset+56(%rsp),%esi
14711- movl \offset+64(%rsp),%edi
14712+ movl \offset+RCX(%rsp),%ecx
14713+ movl \offset+RDX(%rsp),%edx
14714+ movl \offset+RSI(%rsp),%esi
14715+ movl \offset+RDI(%rsp),%edi
14716 movl %eax,%eax /* zero extension */
14717 .endm
14718
14719@@ -96,6 +98,32 @@ ENTRY(native_irq_enable_sysexit)
14720 ENDPROC(native_irq_enable_sysexit)
14721 #endif
14722
14723+ .macro pax_enter_kernel_user
14724+ pax_set_fptr_mask
14725+#ifdef CONFIG_PAX_MEMORY_UDEREF
14726+ call pax_enter_kernel_user
14727+#endif
14728+ .endm
14729+
14730+ .macro pax_exit_kernel_user
14731+#ifdef CONFIG_PAX_MEMORY_UDEREF
14732+ call pax_exit_kernel_user
14733+#endif
14734+#ifdef CONFIG_PAX_RANDKSTACK
14735+ pushq %rax
14736+ pushq %r11
14737+ call pax_randomize_kstack
14738+ popq %r11
14739+ popq %rax
14740+#endif
14741+ .endm
14742+
14743+ .macro pax_erase_kstack
14744+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14745+ call pax_erase_kstack
14746+#endif
14747+ .endm
14748+
14749 /*
14750 * 32bit SYSENTER instruction entry.
14751 *
14752@@ -122,12 +150,6 @@ ENTRY(ia32_sysenter_target)
14753 CFI_REGISTER rsp,rbp
14754 SWAPGS_UNSAFE_STACK
14755 movq PER_CPU_VAR(kernel_stack), %rsp
14756- addq $(KERNEL_STACK_OFFSET),%rsp
14757- /*
14758- * No need to follow this irqs on/off section: the syscall
14759- * disabled irqs, here we enable it straight after entry:
14760- */
14761- ENABLE_INTERRUPTS(CLBR_NONE)
14762 movl %ebp,%ebp /* zero extension */
14763 pushq_cfi $__USER32_DS
14764 /*CFI_REL_OFFSET ss,0*/
14765@@ -135,23 +157,46 @@ ENTRY(ia32_sysenter_target)
14766 CFI_REL_OFFSET rsp,0
14767 pushfq_cfi
14768 /*CFI_REL_OFFSET rflags,0*/
14769- movl TI_sysenter_return+THREAD_INFO(%rsp,3*8-KERNEL_STACK_OFFSET),%r10d
14770- CFI_REGISTER rip,r10
14771+ orl $X86_EFLAGS_IF,(%rsp)
14772+ GET_THREAD_INFO(%r11)
14773+ movl TI_sysenter_return(%r11), %r11d
14774+ CFI_REGISTER rip,r11
14775 pushq_cfi $__USER32_CS
14776 /*CFI_REL_OFFSET cs,0*/
14777 movl %eax, %eax
14778- pushq_cfi %r10
14779+ pushq_cfi %r11
14780 CFI_REL_OFFSET rip,0
14781 pushq_cfi %rax
14782 cld
14783 SAVE_ARGS 0,1,0
14784+ pax_enter_kernel_user
14785+
14786+#ifdef CONFIG_PAX_RANDKSTACK
14787+ pax_erase_kstack
14788+#endif
14789+
14790+ /*
14791+ * No need to follow this irqs on/off section: the syscall
14792+ * disabled irqs, here we enable it straight after entry:
14793+ */
14794+ ENABLE_INTERRUPTS(CLBR_NONE)
14795 /* no need to do an access_ok check here because rbp has been
14796 32bit zero extended */
14797+
14798+#ifdef CONFIG_PAX_MEMORY_UDEREF
14799+ addq pax_user_shadow_base,%rbp
14800+ ASM_PAX_OPEN_USERLAND
14801+#endif
14802+
14803 ASM_STAC
14804 1: movl (%rbp),%ebp
14805 _ASM_EXTABLE(1b,ia32_badarg)
14806 ASM_CLAC
14807
14808+#ifdef CONFIG_PAX_MEMORY_UDEREF
14809+ ASM_PAX_CLOSE_USERLAND
14810+#endif
14811+
14812 /*
14813 * Sysenter doesn't filter flags, so we need to clear NT
14814 * ourselves. To save a few cycles, we can check whether
14815@@ -161,8 +206,9 @@ ENTRY(ia32_sysenter_target)
14816 jnz sysenter_fix_flags
14817 sysenter_flags_fixed:
14818
14819- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14820- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14821+ GET_THREAD_INFO(%r11)
14822+ orl $TS_COMPAT,TI_status(%r11)
14823+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
14824 CFI_REMEMBER_STATE
14825 jnz sysenter_tracesys
14826 cmpq $(IA32_NR_syscalls-1),%rax
14827@@ -172,14 +218,17 @@ sysenter_do_call:
14828 sysenter_dispatch:
14829 call *ia32_sys_call_table(,%rax,8)
14830 movq %rax,RAX-ARGOFFSET(%rsp)
14831+ GET_THREAD_INFO(%r11)
14832 DISABLE_INTERRUPTS(CLBR_NONE)
14833 TRACE_IRQS_OFF
14834- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14835+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
14836 jnz sysexit_audit
14837 sysexit_from_sys_call:
14838- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14839+ pax_exit_kernel_user
14840+ pax_erase_kstack
14841+ andl $~TS_COMPAT,TI_status(%r11)
14842 /* clear IF, that popfq doesn't enable interrupts early */
14843- andl $~0x200,EFLAGS-ARGOFFSET(%rsp)
14844+ andl $~X86_EFLAGS_IF,EFLAGS-ARGOFFSET(%rsp)
14845 movl RIP-ARGOFFSET(%rsp),%edx /* User %eip */
14846 CFI_REGISTER rip,rdx
14847 RESTORE_ARGS 0,24,0,0,0,0
14848@@ -205,6 +254,9 @@ sysexit_from_sys_call:
14849 movl %ebx,%esi /* 2nd arg: 1st syscall arg */
14850 movl %eax,%edi /* 1st arg: syscall number */
14851 call __audit_syscall_entry
14852+
14853+ pax_erase_kstack
14854+
14855 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
14856 cmpq $(IA32_NR_syscalls-1),%rax
14857 ja ia32_badsys
14858@@ -216,7 +268,7 @@ sysexit_from_sys_call:
14859 .endm
14860
14861 .macro auditsys_exit exit
14862- testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14863+ testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
14864 jnz ia32_ret_from_sys_call
14865 TRACE_IRQS_ON
14866 ENABLE_INTERRUPTS(CLBR_NONE)
14867@@ -227,11 +279,12 @@ sysexit_from_sys_call:
14868 1: setbe %al /* 1 if error, 0 if not */
14869 movzbl %al,%edi /* zero-extend that into %edi */
14870 call __audit_syscall_exit
14871+ GET_THREAD_INFO(%r11)
14872 movq RAX-ARGOFFSET(%rsp),%rax /* reload syscall return value */
14873 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
14874 DISABLE_INTERRUPTS(CLBR_NONE)
14875 TRACE_IRQS_OFF
14876- testl %edi,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14877+ testl %edi,TI_flags(%r11)
14878 jz \exit
14879 CLEAR_RREGS -ARGOFFSET
14880 jmp int_with_check
14881@@ -253,7 +306,7 @@ sysenter_fix_flags:
14882
14883 sysenter_tracesys:
14884 #ifdef CONFIG_AUDITSYSCALL
14885- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14886+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
14887 jz sysenter_auditsys
14888 #endif
14889 SAVE_REST
14890@@ -265,6 +318,9 @@ sysenter_tracesys:
14891 RESTORE_REST
14892 cmpq $(IA32_NR_syscalls-1),%rax
14893 ja int_ret_from_sys_call /* sysenter_tracesys has set RAX(%rsp) */
14894+
14895+ pax_erase_kstack
14896+
14897 jmp sysenter_do_call
14898 CFI_ENDPROC
14899 ENDPROC(ia32_sysenter_target)
14900@@ -292,19 +348,25 @@ ENDPROC(ia32_sysenter_target)
14901 ENTRY(ia32_cstar_target)
14902 CFI_STARTPROC32 simple
14903 CFI_SIGNAL_FRAME
14904- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
14905+ CFI_DEF_CFA rsp,0
14906 CFI_REGISTER rip,rcx
14907 /*CFI_REGISTER rflags,r11*/
14908 SWAPGS_UNSAFE_STACK
14909 movl %esp,%r8d
14910 CFI_REGISTER rsp,r8
14911 movq PER_CPU_VAR(kernel_stack),%rsp
14912+ SAVE_ARGS 8*6,0,0
14913+ pax_enter_kernel_user
14914+
14915+#ifdef CONFIG_PAX_RANDKSTACK
14916+ pax_erase_kstack
14917+#endif
14918+
14919 /*
14920 * No need to follow this irqs on/off section: the syscall
14921 * disabled irqs and here we enable it straight after entry:
14922 */
14923 ENABLE_INTERRUPTS(CLBR_NONE)
14924- SAVE_ARGS 8,0,0
14925 movl %eax,%eax /* zero extension */
14926 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
14927 movq %rcx,RIP-ARGOFFSET(%rsp)
14928@@ -320,12 +382,25 @@ ENTRY(ia32_cstar_target)
14929 /* no need to do an access_ok check here because r8 has been
14930 32bit zero extended */
14931 /* hardware stack frame is complete now */
14932+
14933+#ifdef CONFIG_PAX_MEMORY_UDEREF
14934+ ASM_PAX_OPEN_USERLAND
14935+ movq pax_user_shadow_base,%r8
14936+ addq RSP-ARGOFFSET(%rsp),%r8
14937+#endif
14938+
14939 ASM_STAC
14940 1: movl (%r8),%r9d
14941 _ASM_EXTABLE(1b,ia32_badarg)
14942 ASM_CLAC
14943- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14944- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14945+
14946+#ifdef CONFIG_PAX_MEMORY_UDEREF
14947+ ASM_PAX_CLOSE_USERLAND
14948+#endif
14949+
14950+ GET_THREAD_INFO(%r11)
14951+ orl $TS_COMPAT,TI_status(%r11)
14952+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
14953 CFI_REMEMBER_STATE
14954 jnz cstar_tracesys
14955 cmpq $IA32_NR_syscalls-1,%rax
14956@@ -335,13 +410,16 @@ cstar_do_call:
14957 cstar_dispatch:
14958 call *ia32_sys_call_table(,%rax,8)
14959 movq %rax,RAX-ARGOFFSET(%rsp)
14960+ GET_THREAD_INFO(%r11)
14961 DISABLE_INTERRUPTS(CLBR_NONE)
14962 TRACE_IRQS_OFF
14963- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14964+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
14965 jnz sysretl_audit
14966 sysretl_from_sys_call:
14967- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14968- RESTORE_ARGS 0,-ARG_SKIP,0,0,0
14969+ pax_exit_kernel_user
14970+ pax_erase_kstack
14971+ andl $~TS_COMPAT,TI_status(%r11)
14972+ RESTORE_ARGS 0,-ORIG_RAX,0,0,0
14973 movl RIP-ARGOFFSET(%rsp),%ecx
14974 CFI_REGISTER rip,rcx
14975 movl EFLAGS-ARGOFFSET(%rsp),%r11d
14976@@ -368,7 +446,7 @@ sysretl_audit:
14977
14978 cstar_tracesys:
14979 #ifdef CONFIG_AUDITSYSCALL
14980- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14981+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
14982 jz cstar_auditsys
14983 #endif
14984 xchgl %r9d,%ebp
14985@@ -382,11 +460,19 @@ cstar_tracesys:
14986 xchgl %ebp,%r9d
14987 cmpq $(IA32_NR_syscalls-1),%rax
14988 ja int_ret_from_sys_call /* cstar_tracesys has set RAX(%rsp) */
14989+
14990+ pax_erase_kstack
14991+
14992 jmp cstar_do_call
14993 END(ia32_cstar_target)
14994
14995 ia32_badarg:
14996 ASM_CLAC
14997+
14998+#ifdef CONFIG_PAX_MEMORY_UDEREF
14999+ ASM_PAX_CLOSE_USERLAND
15000+#endif
15001+
15002 movq $-EFAULT,%rax
15003 jmp ia32_sysret
15004 CFI_ENDPROC
15005@@ -423,19 +509,26 @@ ENTRY(ia32_syscall)
15006 CFI_REL_OFFSET rip,RIP-RIP
15007 PARAVIRT_ADJUST_EXCEPTION_FRAME
15008 SWAPGS
15009- /*
15010- * No need to follow this irqs on/off section: the syscall
15011- * disabled irqs and here we enable it straight after entry:
15012- */
15013- ENABLE_INTERRUPTS(CLBR_NONE)
15014 movl %eax,%eax
15015 pushq_cfi %rax
15016 cld
15017 /* note the registers are not zero extended to the sf.
15018 this could be a problem. */
15019 SAVE_ARGS 0,1,0
15020- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15021- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15022+ pax_enter_kernel_user
15023+
15024+#ifdef CONFIG_PAX_RANDKSTACK
15025+ pax_erase_kstack
15026+#endif
15027+
15028+ /*
15029+ * No need to follow this irqs on/off section: the syscall
15030+ * disabled irqs and here we enable it straight after entry:
15031+ */
15032+ ENABLE_INTERRUPTS(CLBR_NONE)
15033+ GET_THREAD_INFO(%r11)
15034+ orl $TS_COMPAT,TI_status(%r11)
15035+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
15036 jnz ia32_tracesys
15037 cmpq $(IA32_NR_syscalls-1),%rax
15038 ja ia32_badsys
15039@@ -458,6 +551,9 @@ ia32_tracesys:
15040 RESTORE_REST
15041 cmpq $(IA32_NR_syscalls-1),%rax
15042 ja int_ret_from_sys_call /* ia32_tracesys has set RAX(%rsp) */
15043+
15044+ pax_erase_kstack
15045+
15046 jmp ia32_do_call
15047 END(ia32_syscall)
15048
15049diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
15050index 8e0ceec..af13504 100644
15051--- a/arch/x86/ia32/sys_ia32.c
15052+++ b/arch/x86/ia32/sys_ia32.c
15053@@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
15054 */
15055 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
15056 {
15057- typeof(ubuf->st_uid) uid = 0;
15058- typeof(ubuf->st_gid) gid = 0;
15059+ typeof(((struct stat64 *)0)->st_uid) uid = 0;
15060+ typeof(((struct stat64 *)0)->st_gid) gid = 0;
15061 SET_UID(uid, from_kuid_munged(current_user_ns(), stat->uid));
15062 SET_GID(gid, from_kgid_munged(current_user_ns(), stat->gid));
15063 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
15064diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
15065index 372231c..51b537d 100644
15066--- a/arch/x86/include/asm/alternative-asm.h
15067+++ b/arch/x86/include/asm/alternative-asm.h
15068@@ -18,6 +18,45 @@
15069 .endm
15070 #endif
15071
15072+#ifdef KERNEXEC_PLUGIN
15073+ .macro pax_force_retaddr_bts rip=0
15074+ btsq $63,\rip(%rsp)
15075+ .endm
15076+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
15077+ .macro pax_force_retaddr rip=0, reload=0
15078+ btsq $63,\rip(%rsp)
15079+ .endm
15080+ .macro pax_force_fptr ptr
15081+ btsq $63,\ptr
15082+ .endm
15083+ .macro pax_set_fptr_mask
15084+ .endm
15085+#endif
15086+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
15087+ .macro pax_force_retaddr rip=0, reload=0
15088+ .if \reload
15089+ pax_set_fptr_mask
15090+ .endif
15091+ orq %r12,\rip(%rsp)
15092+ .endm
15093+ .macro pax_force_fptr ptr
15094+ orq %r12,\ptr
15095+ .endm
15096+ .macro pax_set_fptr_mask
15097+ movabs $0x8000000000000000,%r12
15098+ .endm
15099+#endif
15100+#else
15101+ .macro pax_force_retaddr rip=0, reload=0
15102+ .endm
15103+ .macro pax_force_fptr ptr
15104+ .endm
15105+ .macro pax_force_retaddr_bts rip=0
15106+ .endm
15107+ .macro pax_set_fptr_mask
15108+ .endm
15109+#endif
15110+
15111 .macro altinstruction_entry orig alt feature orig_len alt_len
15112 .long \orig - .
15113 .long \alt - .
15114diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
15115index 473bdbe..b1e3377 100644
15116--- a/arch/x86/include/asm/alternative.h
15117+++ b/arch/x86/include/asm/alternative.h
15118@@ -106,7 +106,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
15119 ".pushsection .discard,\"aw\",@progbits\n" \
15120 DISCARD_ENTRY(1) \
15121 ".popsection\n" \
15122- ".pushsection .altinstr_replacement, \"ax\"\n" \
15123+ ".pushsection .altinstr_replacement, \"a\"\n" \
15124 ALTINSTR_REPLACEMENT(newinstr, feature, 1) \
15125 ".popsection"
15126
15127@@ -120,7 +120,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
15128 DISCARD_ENTRY(1) \
15129 DISCARD_ENTRY(2) \
15130 ".popsection\n" \
15131- ".pushsection .altinstr_replacement, \"ax\"\n" \
15132+ ".pushsection .altinstr_replacement, \"a\"\n" \
15133 ALTINSTR_REPLACEMENT(newinstr1, feature1, 1) \
15134 ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \
15135 ".popsection"
15136diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
15137index efc3b22..85c4f3a 100644
15138--- a/arch/x86/include/asm/apic.h
15139+++ b/arch/x86/include/asm/apic.h
15140@@ -45,7 +45,7 @@ static inline void generic_apic_probe(void)
15141
15142 #ifdef CONFIG_X86_LOCAL_APIC
15143
15144-extern unsigned int apic_verbosity;
15145+extern int apic_verbosity;
15146 extern int local_apic_timer_c2_ok;
15147
15148 extern int disable_apic;
15149diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
15150index 20370c6..a2eb9b0 100644
15151--- a/arch/x86/include/asm/apm.h
15152+++ b/arch/x86/include/asm/apm.h
15153@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
15154 __asm__ __volatile__(APM_DO_ZERO_SEGS
15155 "pushl %%edi\n\t"
15156 "pushl %%ebp\n\t"
15157- "lcall *%%cs:apm_bios_entry\n\t"
15158+ "lcall *%%ss:apm_bios_entry\n\t"
15159 "setc %%al\n\t"
15160 "popl %%ebp\n\t"
15161 "popl %%edi\n\t"
15162@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
15163 __asm__ __volatile__(APM_DO_ZERO_SEGS
15164 "pushl %%edi\n\t"
15165 "pushl %%ebp\n\t"
15166- "lcall *%%cs:apm_bios_entry\n\t"
15167+ "lcall *%%ss:apm_bios_entry\n\t"
15168 "setc %%bl\n\t"
15169 "popl %%ebp\n\t"
15170 "popl %%edi\n\t"
15171diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
15172index 5e5cd12..51cdc93 100644
15173--- a/arch/x86/include/asm/atomic.h
15174+++ b/arch/x86/include/asm/atomic.h
15175@@ -28,6 +28,17 @@ static inline int atomic_read(const atomic_t *v)
15176 }
15177
15178 /**
15179+ * atomic_read_unchecked - read atomic variable
15180+ * @v: pointer of type atomic_unchecked_t
15181+ *
15182+ * Atomically reads the value of @v.
15183+ */
15184+static inline int __intentional_overflow(-1) atomic_read_unchecked(const atomic_unchecked_t *v)
15185+{
15186+ return ACCESS_ONCE((v)->counter);
15187+}
15188+
15189+/**
15190 * atomic_set - set atomic variable
15191 * @v: pointer of type atomic_t
15192 * @i: required value
15193@@ -40,6 +51,18 @@ static inline void atomic_set(atomic_t *v, int i)
15194 }
15195
15196 /**
15197+ * atomic_set_unchecked - set atomic variable
15198+ * @v: pointer of type atomic_unchecked_t
15199+ * @i: required value
15200+ *
15201+ * Atomically sets the value of @v to @i.
15202+ */
15203+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
15204+{
15205+ v->counter = i;
15206+}
15207+
15208+/**
15209 * atomic_add - add integer to atomic variable
15210 * @i: integer value to add
15211 * @v: pointer of type atomic_t
15212@@ -48,7 +71,29 @@ static inline void atomic_set(atomic_t *v, int i)
15213 */
15214 static inline void atomic_add(int i, atomic_t *v)
15215 {
15216- asm volatile(LOCK_PREFIX "addl %1,%0"
15217+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
15218+
15219+#ifdef CONFIG_PAX_REFCOUNT
15220+ "jno 0f\n"
15221+ LOCK_PREFIX "subl %1,%0\n"
15222+ "int $4\n0:\n"
15223+ _ASM_EXTABLE(0b, 0b)
15224+#endif
15225+
15226+ : "+m" (v->counter)
15227+ : "ir" (i));
15228+}
15229+
15230+/**
15231+ * atomic_add_unchecked - add integer to atomic variable
15232+ * @i: integer value to add
15233+ * @v: pointer of type atomic_unchecked_t
15234+ *
15235+ * Atomically adds @i to @v.
15236+ */
15237+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
15238+{
15239+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
15240 : "+m" (v->counter)
15241 : "ir" (i));
15242 }
15243@@ -62,7 +107,29 @@ static inline void atomic_add(int i, atomic_t *v)
15244 */
15245 static inline void atomic_sub(int i, atomic_t *v)
15246 {
15247- asm volatile(LOCK_PREFIX "subl %1,%0"
15248+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
15249+
15250+#ifdef CONFIG_PAX_REFCOUNT
15251+ "jno 0f\n"
15252+ LOCK_PREFIX "addl %1,%0\n"
15253+ "int $4\n0:\n"
15254+ _ASM_EXTABLE(0b, 0b)
15255+#endif
15256+
15257+ : "+m" (v->counter)
15258+ : "ir" (i));
15259+}
15260+
15261+/**
15262+ * atomic_sub_unchecked - subtract integer from atomic variable
15263+ * @i: integer value to subtract
15264+ * @v: pointer of type atomic_unchecked_t
15265+ *
15266+ * Atomically subtracts @i from @v.
15267+ */
15268+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
15269+{
15270+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
15271 : "+m" (v->counter)
15272 : "ir" (i));
15273 }
15274@@ -78,7 +145,7 @@ static inline void atomic_sub(int i, atomic_t *v)
15275 */
15276 static inline int atomic_sub_and_test(int i, atomic_t *v)
15277 {
15278- GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", "e");
15279+ GEN_BINARY_RMWcc(LOCK_PREFIX "subl", LOCK_PREFIX "addl", v->counter, "er", i, "%0", "e");
15280 }
15281
15282 /**
15283@@ -89,7 +156,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
15284 */
15285 static inline void atomic_inc(atomic_t *v)
15286 {
15287- asm volatile(LOCK_PREFIX "incl %0"
15288+ asm volatile(LOCK_PREFIX "incl %0\n"
15289+
15290+#ifdef CONFIG_PAX_REFCOUNT
15291+ "jno 0f\n"
15292+ LOCK_PREFIX "decl %0\n"
15293+ "int $4\n0:\n"
15294+ _ASM_EXTABLE(0b, 0b)
15295+#endif
15296+
15297+ : "+m" (v->counter));
15298+}
15299+
15300+/**
15301+ * atomic_inc_unchecked - increment atomic variable
15302+ * @v: pointer of type atomic_unchecked_t
15303+ *
15304+ * Atomically increments @v by 1.
15305+ */
15306+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
15307+{
15308+ asm volatile(LOCK_PREFIX "incl %0\n"
15309 : "+m" (v->counter));
15310 }
15311
15312@@ -101,7 +188,27 @@ static inline void atomic_inc(atomic_t *v)
15313 */
15314 static inline void atomic_dec(atomic_t *v)
15315 {
15316- asm volatile(LOCK_PREFIX "decl %0"
15317+ asm volatile(LOCK_PREFIX "decl %0\n"
15318+
15319+#ifdef CONFIG_PAX_REFCOUNT
15320+ "jno 0f\n"
15321+ LOCK_PREFIX "incl %0\n"
15322+ "int $4\n0:\n"
15323+ _ASM_EXTABLE(0b, 0b)
15324+#endif
15325+
15326+ : "+m" (v->counter));
15327+}
15328+
15329+/**
15330+ * atomic_dec_unchecked - decrement atomic variable
15331+ * @v: pointer of type atomic_unchecked_t
15332+ *
15333+ * Atomically decrements @v by 1.
15334+ */
15335+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
15336+{
15337+ asm volatile(LOCK_PREFIX "decl %0\n"
15338 : "+m" (v->counter));
15339 }
15340
15341@@ -115,7 +222,7 @@ static inline void atomic_dec(atomic_t *v)
15342 */
15343 static inline int atomic_dec_and_test(atomic_t *v)
15344 {
15345- GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", "e");
15346+ GEN_UNARY_RMWcc(LOCK_PREFIX "decl", LOCK_PREFIX "incl", v->counter, "%0", "e");
15347 }
15348
15349 /**
15350@@ -128,7 +235,20 @@ static inline int atomic_dec_and_test(atomic_t *v)
15351 */
15352 static inline int atomic_inc_and_test(atomic_t *v)
15353 {
15354- GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, "%0", "e");
15355+ GEN_UNARY_RMWcc(LOCK_PREFIX "incl", LOCK_PREFIX "decl", v->counter, "%0", "e");
15356+}
15357+
15358+/**
15359+ * atomic_inc_and_test_unchecked - increment and test
15360+ * @v: pointer of type atomic_unchecked_t
15361+ *
15362+ * Atomically increments @v by 1
15363+ * and returns true if the result is zero, or false for all
15364+ * other cases.
15365+ */
15366+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
15367+{
15368+ GEN_UNARY_RMWcc_unchecked(LOCK_PREFIX "incl", v->counter, "%0", "e");
15369 }
15370
15371 /**
15372@@ -142,7 +262,7 @@ static inline int atomic_inc_and_test(atomic_t *v)
15373 */
15374 static inline int atomic_add_negative(int i, atomic_t *v)
15375 {
15376- GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, "er", i, "%0", "s");
15377+ GEN_BINARY_RMWcc(LOCK_PREFIX "addl", LOCK_PREFIX "subl", v->counter, "er", i, "%0", "s");
15378 }
15379
15380 /**
15381@@ -152,7 +272,19 @@ static inline int atomic_add_negative(int i, atomic_t *v)
15382 *
15383 * Atomically adds @i to @v and returns @i + @v
15384 */
15385-static inline int atomic_add_return(int i, atomic_t *v)
15386+static inline int __intentional_overflow(-1) atomic_add_return(int i, atomic_t *v)
15387+{
15388+ return i + xadd_check_overflow(&v->counter, i);
15389+}
15390+
15391+/**
15392+ * atomic_add_return_unchecked - add integer and return
15393+ * @i: integer value to add
15394+ * @v: pointer of type atomic_unchecked_t
15395+ *
15396+ * Atomically adds @i to @v and returns @i + @v
15397+ */
15398+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
15399 {
15400 return i + xadd(&v->counter, i);
15401 }
15402@@ -164,15 +296,24 @@ static inline int atomic_add_return(int i, atomic_t *v)
15403 *
15404 * Atomically subtracts @i from @v and returns @v - @i
15405 */
15406-static inline int atomic_sub_return(int i, atomic_t *v)
15407+static inline int __intentional_overflow(-1) atomic_sub_return(int i, atomic_t *v)
15408 {
15409 return atomic_add_return(-i, v);
15410 }
15411
15412 #define atomic_inc_return(v) (atomic_add_return(1, v))
15413+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
15414+{
15415+ return atomic_add_return_unchecked(1, v);
15416+}
15417 #define atomic_dec_return(v) (atomic_sub_return(1, v))
15418
15419-static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
15420+static inline int __intentional_overflow(-1) atomic_cmpxchg(atomic_t *v, int old, int new)
15421+{
15422+ return cmpxchg(&v->counter, old, new);
15423+}
15424+
15425+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
15426 {
15427 return cmpxchg(&v->counter, old, new);
15428 }
15429@@ -182,6 +323,11 @@ static inline int atomic_xchg(atomic_t *v, int new)
15430 return xchg(&v->counter, new);
15431 }
15432
15433+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
15434+{
15435+ return xchg(&v->counter, new);
15436+}
15437+
15438 /**
15439 * __atomic_add_unless - add unless the number is already a given value
15440 * @v: pointer of type atomic_t
15441@@ -193,12 +339,25 @@ static inline int atomic_xchg(atomic_t *v, int new)
15442 */
15443 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
15444 {
15445- int c, old;
15446+ int c, old, new;
15447 c = atomic_read(v);
15448 for (;;) {
15449- if (unlikely(c == (u)))
15450+ if (unlikely(c == u))
15451 break;
15452- old = atomic_cmpxchg((v), c, c + (a));
15453+
15454+ asm volatile("addl %2,%0\n"
15455+
15456+#ifdef CONFIG_PAX_REFCOUNT
15457+ "jno 0f\n"
15458+ "subl %2,%0\n"
15459+ "int $4\n0:\n"
15460+ _ASM_EXTABLE(0b, 0b)
15461+#endif
15462+
15463+ : "=r" (new)
15464+ : "0" (c), "ir" (a));
15465+
15466+ old = atomic_cmpxchg(v, c, new);
15467 if (likely(old == c))
15468 break;
15469 c = old;
15470@@ -207,6 +366,49 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
15471 }
15472
15473 /**
15474+ * atomic_inc_not_zero_hint - increment if not null
15475+ * @v: pointer of type atomic_t
15476+ * @hint: probable value of the atomic before the increment
15477+ *
15478+ * This version of atomic_inc_not_zero() gives a hint of probable
15479+ * value of the atomic. This helps processor to not read the memory
15480+ * before doing the atomic read/modify/write cycle, lowering
15481+ * number of bus transactions on some arches.
15482+ *
15483+ * Returns: 0 if increment was not done, 1 otherwise.
15484+ */
15485+#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
15486+static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
15487+{
15488+ int val, c = hint, new;
15489+
15490+ /* sanity test, should be removed by compiler if hint is a constant */
15491+ if (!hint)
15492+ return __atomic_add_unless(v, 1, 0);
15493+
15494+ do {
15495+ asm volatile("incl %0\n"
15496+
15497+#ifdef CONFIG_PAX_REFCOUNT
15498+ "jno 0f\n"
15499+ "decl %0\n"
15500+ "int $4\n0:\n"
15501+ _ASM_EXTABLE(0b, 0b)
15502+#endif
15503+
15504+ : "=r" (new)
15505+ : "0" (c));
15506+
15507+ val = atomic_cmpxchg(v, c, new);
15508+ if (val == c)
15509+ return 1;
15510+ c = val;
15511+ } while (c);
15512+
15513+ return 0;
15514+}
15515+
15516+/**
15517 * atomic_inc_short - increment of a short integer
15518 * @v: pointer to type int
15519 *
15520@@ -220,14 +422,37 @@ static inline short int atomic_inc_short(short int *v)
15521 }
15522
15523 /* These are x86-specific, used by some header files */
15524-#define atomic_clear_mask(mask, addr) \
15525- asm volatile(LOCK_PREFIX "andl %0,%1" \
15526- : : "r" (~(mask)), "m" (*(addr)) : "memory")
15527+static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
15528+{
15529+ asm volatile(LOCK_PREFIX "andl %1,%0"
15530+ : "+m" (v->counter)
15531+ : "r" (~(mask))
15532+ : "memory");
15533+}
15534
15535-#define atomic_set_mask(mask, addr) \
15536- asm volatile(LOCK_PREFIX "orl %0,%1" \
15537- : : "r" ((unsigned)(mask)), "m" (*(addr)) \
15538- : "memory")
15539+static inline void atomic_clear_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
15540+{
15541+ asm volatile(LOCK_PREFIX "andl %1,%0"
15542+ : "+m" (v->counter)
15543+ : "r" (~(mask))
15544+ : "memory");
15545+}
15546+
15547+static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
15548+{
15549+ asm volatile(LOCK_PREFIX "orl %1,%0"
15550+ : "+m" (v->counter)
15551+ : "r" (mask)
15552+ : "memory");
15553+}
15554+
15555+static inline void atomic_set_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
15556+{
15557+ asm volatile(LOCK_PREFIX "orl %1,%0"
15558+ : "+m" (v->counter)
15559+ : "r" (mask)
15560+ : "memory");
15561+}
15562
15563 #ifdef CONFIG_X86_32
15564 # include <asm/atomic64_32.h>
15565diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
15566index b154de7..bf18a5a 100644
15567--- a/arch/x86/include/asm/atomic64_32.h
15568+++ b/arch/x86/include/asm/atomic64_32.h
15569@@ -12,6 +12,14 @@ typedef struct {
15570 u64 __aligned(8) counter;
15571 } atomic64_t;
15572
15573+#ifdef CONFIG_PAX_REFCOUNT
15574+typedef struct {
15575+ u64 __aligned(8) counter;
15576+} atomic64_unchecked_t;
15577+#else
15578+typedef atomic64_t atomic64_unchecked_t;
15579+#endif
15580+
15581 #define ATOMIC64_INIT(val) { (val) }
15582
15583 #define __ATOMIC64_DECL(sym) void atomic64_##sym(atomic64_t *, ...)
15584@@ -37,21 +45,31 @@ typedef struct {
15585 ATOMIC64_DECL_ONE(sym##_386)
15586
15587 ATOMIC64_DECL_ONE(add_386);
15588+ATOMIC64_DECL_ONE(add_unchecked_386);
15589 ATOMIC64_DECL_ONE(sub_386);
15590+ATOMIC64_DECL_ONE(sub_unchecked_386);
15591 ATOMIC64_DECL_ONE(inc_386);
15592+ATOMIC64_DECL_ONE(inc_unchecked_386);
15593 ATOMIC64_DECL_ONE(dec_386);
15594+ATOMIC64_DECL_ONE(dec_unchecked_386);
15595 #endif
15596
15597 #define alternative_atomic64(f, out, in...) \
15598 __alternative_atomic64(f, f, ASM_OUTPUT2(out), ## in)
15599
15600 ATOMIC64_DECL(read);
15601+ATOMIC64_DECL(read_unchecked);
15602 ATOMIC64_DECL(set);
15603+ATOMIC64_DECL(set_unchecked);
15604 ATOMIC64_DECL(xchg);
15605 ATOMIC64_DECL(add_return);
15606+ATOMIC64_DECL(add_return_unchecked);
15607 ATOMIC64_DECL(sub_return);
15608+ATOMIC64_DECL(sub_return_unchecked);
15609 ATOMIC64_DECL(inc_return);
15610+ATOMIC64_DECL(inc_return_unchecked);
15611 ATOMIC64_DECL(dec_return);
15612+ATOMIC64_DECL(dec_return_unchecked);
15613 ATOMIC64_DECL(dec_if_positive);
15614 ATOMIC64_DECL(inc_not_zero);
15615 ATOMIC64_DECL(add_unless);
15616@@ -77,6 +95,21 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n
15617 }
15618
15619 /**
15620+ * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
15621+ * @p: pointer to type atomic64_unchecked_t
15622+ * @o: expected value
15623+ * @n: new value
15624+ *
15625+ * Atomically sets @v to @n if it was equal to @o and returns
15626+ * the old value.
15627+ */
15628+
15629+static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
15630+{
15631+ return cmpxchg64(&v->counter, o, n);
15632+}
15633+
15634+/**
15635 * atomic64_xchg - xchg atomic64 variable
15636 * @v: pointer to type atomic64_t
15637 * @n: value to assign
15638@@ -112,6 +145,22 @@ static inline void atomic64_set(atomic64_t *v, long long i)
15639 }
15640
15641 /**
15642+ * atomic64_set_unchecked - set atomic64 variable
15643+ * @v: pointer to type atomic64_unchecked_t
15644+ * @n: value to assign
15645+ *
15646+ * Atomically sets the value of @v to @n.
15647+ */
15648+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
15649+{
15650+ unsigned high = (unsigned)(i >> 32);
15651+ unsigned low = (unsigned)i;
15652+ alternative_atomic64(set, /* no output */,
15653+ "S" (v), "b" (low), "c" (high)
15654+ : "eax", "edx", "memory");
15655+}
15656+
15657+/**
15658 * atomic64_read - read atomic64 variable
15659 * @v: pointer to type atomic64_t
15660 *
15661@@ -125,6 +174,19 @@ static inline long long atomic64_read(const atomic64_t *v)
15662 }
15663
15664 /**
15665+ * atomic64_read_unchecked - read atomic64 variable
15666+ * @v: pointer to type atomic64_unchecked_t
15667+ *
15668+ * Atomically reads the value of @v and returns it.
15669+ */
15670+static inline long long __intentional_overflow(-1) atomic64_read_unchecked(atomic64_unchecked_t *v)
15671+{
15672+ long long r;
15673+ alternative_atomic64(read, "=&A" (r), "c" (v) : "memory");
15674+ return r;
15675+ }
15676+
15677+/**
15678 * atomic64_add_return - add and return
15679 * @i: integer value to add
15680 * @v: pointer to type atomic64_t
15681@@ -139,6 +201,21 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
15682 return i;
15683 }
15684
15685+/**
15686+ * atomic64_add_return_unchecked - add and return
15687+ * @i: integer value to add
15688+ * @v: pointer to type atomic64_unchecked_t
15689+ *
15690+ * Atomically adds @i to @v and returns @i + *@v
15691+ */
15692+static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
15693+{
15694+ alternative_atomic64(add_return_unchecked,
15695+ ASM_OUTPUT2("+A" (i), "+c" (v)),
15696+ ASM_NO_INPUT_CLOBBER("memory"));
15697+ return i;
15698+}
15699+
15700 /*
15701 * Other variants with different arithmetic operators:
15702 */
15703@@ -158,6 +235,14 @@ static inline long long atomic64_inc_return(atomic64_t *v)
15704 return a;
15705 }
15706
15707+static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
15708+{
15709+ long long a;
15710+ alternative_atomic64(inc_return_unchecked, "=&A" (a),
15711+ "S" (v) : "memory", "ecx");
15712+ return a;
15713+}
15714+
15715 static inline long long atomic64_dec_return(atomic64_t *v)
15716 {
15717 long long a;
15718@@ -182,6 +267,21 @@ static inline long long atomic64_add(long long i, atomic64_t *v)
15719 }
15720
15721 /**
15722+ * atomic64_add_unchecked - add integer to atomic64 variable
15723+ * @i: integer value to add
15724+ * @v: pointer to type atomic64_unchecked_t
15725+ *
15726+ * Atomically adds @i to @v.
15727+ */
15728+static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
15729+{
15730+ __alternative_atomic64(add_unchecked, add_return_unchecked,
15731+ ASM_OUTPUT2("+A" (i), "+c" (v)),
15732+ ASM_NO_INPUT_CLOBBER("memory"));
15733+ return i;
15734+}
15735+
15736+/**
15737 * atomic64_sub - subtract the atomic64 variable
15738 * @i: integer value to subtract
15739 * @v: pointer to type atomic64_t
15740diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
15741index f8d273e..02f39f3 100644
15742--- a/arch/x86/include/asm/atomic64_64.h
15743+++ b/arch/x86/include/asm/atomic64_64.h
15744@@ -22,6 +22,18 @@ static inline long atomic64_read(const atomic64_t *v)
15745 }
15746
15747 /**
15748+ * atomic64_read_unchecked - read atomic64 variable
15749+ * @v: pointer of type atomic64_unchecked_t
15750+ *
15751+ * Atomically reads the value of @v.
15752+ * Doesn't imply a read memory barrier.
15753+ */
15754+static inline long __intentional_overflow(-1) atomic64_read_unchecked(const atomic64_unchecked_t *v)
15755+{
15756+ return ACCESS_ONCE((v)->counter);
15757+}
15758+
15759+/**
15760 * atomic64_set - set atomic64 variable
15761 * @v: pointer to type atomic64_t
15762 * @i: required value
15763@@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
15764 }
15765
15766 /**
15767+ * atomic64_set_unchecked - set atomic64 variable
15768+ * @v: pointer to type atomic64_unchecked_t
15769+ * @i: required value
15770+ *
15771+ * Atomically sets the value of @v to @i.
15772+ */
15773+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
15774+{
15775+ v->counter = i;
15776+}
15777+
15778+/**
15779 * atomic64_add - add integer to atomic64 variable
15780 * @i: integer value to add
15781 * @v: pointer to type atomic64_t
15782@@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
15783 */
15784 static inline void atomic64_add(long i, atomic64_t *v)
15785 {
15786+ asm volatile(LOCK_PREFIX "addq %1,%0\n"
15787+
15788+#ifdef CONFIG_PAX_REFCOUNT
15789+ "jno 0f\n"
15790+ LOCK_PREFIX "subq %1,%0\n"
15791+ "int $4\n0:\n"
15792+ _ASM_EXTABLE(0b, 0b)
15793+#endif
15794+
15795+ : "=m" (v->counter)
15796+ : "er" (i), "m" (v->counter));
15797+}
15798+
15799+/**
15800+ * atomic64_add_unchecked - add integer to atomic64 variable
15801+ * @i: integer value to add
15802+ * @v: pointer to type atomic64_unchecked_t
15803+ *
15804+ * Atomically adds @i to @v.
15805+ */
15806+static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
15807+{
15808 asm volatile(LOCK_PREFIX "addq %1,%0"
15809 : "=m" (v->counter)
15810 : "er" (i), "m" (v->counter));
15811@@ -56,7 +102,29 @@ static inline void atomic64_add(long i, atomic64_t *v)
15812 */
15813 static inline void atomic64_sub(long i, atomic64_t *v)
15814 {
15815- asm volatile(LOCK_PREFIX "subq %1,%0"
15816+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
15817+
15818+#ifdef CONFIG_PAX_REFCOUNT
15819+ "jno 0f\n"
15820+ LOCK_PREFIX "addq %1,%0\n"
15821+ "int $4\n0:\n"
15822+ _ASM_EXTABLE(0b, 0b)
15823+#endif
15824+
15825+ : "=m" (v->counter)
15826+ : "er" (i), "m" (v->counter));
15827+}
15828+
15829+/**
15830+ * atomic64_sub_unchecked - subtract the atomic64 variable
15831+ * @i: integer value to subtract
15832+ * @v: pointer to type atomic64_unchecked_t
15833+ *
15834+ * Atomically subtracts @i from @v.
15835+ */
15836+static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
15837+{
15838+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
15839 : "=m" (v->counter)
15840 : "er" (i), "m" (v->counter));
15841 }
15842@@ -72,7 +140,7 @@ static inline void atomic64_sub(long i, atomic64_t *v)
15843 */
15844 static inline int atomic64_sub_and_test(long i, atomic64_t *v)
15845 {
15846- GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, "er", i, "%0", "e");
15847+ GEN_BINARY_RMWcc(LOCK_PREFIX "subq", LOCK_PREFIX "addq", v->counter, "er", i, "%0", "e");
15848 }
15849
15850 /**
15851@@ -83,6 +151,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
15852 */
15853 static inline void atomic64_inc(atomic64_t *v)
15854 {
15855+ asm volatile(LOCK_PREFIX "incq %0\n"
15856+
15857+#ifdef CONFIG_PAX_REFCOUNT
15858+ "jno 0f\n"
15859+ LOCK_PREFIX "decq %0\n"
15860+ "int $4\n0:\n"
15861+ _ASM_EXTABLE(0b, 0b)
15862+#endif
15863+
15864+ : "=m" (v->counter)
15865+ : "m" (v->counter));
15866+}
15867+
15868+/**
15869+ * atomic64_inc_unchecked - increment atomic64 variable
15870+ * @v: pointer to type atomic64_unchecked_t
15871+ *
15872+ * Atomically increments @v by 1.
15873+ */
15874+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
15875+{
15876 asm volatile(LOCK_PREFIX "incq %0"
15877 : "=m" (v->counter)
15878 : "m" (v->counter));
15879@@ -96,7 +185,28 @@ static inline void atomic64_inc(atomic64_t *v)
15880 */
15881 static inline void atomic64_dec(atomic64_t *v)
15882 {
15883- asm volatile(LOCK_PREFIX "decq %0"
15884+ asm volatile(LOCK_PREFIX "decq %0\n"
15885+
15886+#ifdef CONFIG_PAX_REFCOUNT
15887+ "jno 0f\n"
15888+ LOCK_PREFIX "incq %0\n"
15889+ "int $4\n0:\n"
15890+ _ASM_EXTABLE(0b, 0b)
15891+#endif
15892+
15893+ : "=m" (v->counter)
15894+ : "m" (v->counter));
15895+}
15896+
15897+/**
15898+ * atomic64_dec_unchecked - decrement atomic64 variable
15899+ * @v: pointer to type atomic64_t
15900+ *
15901+ * Atomically decrements @v by 1.
15902+ */
15903+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
15904+{
15905+ asm volatile(LOCK_PREFIX "decq %0\n"
15906 : "=m" (v->counter)
15907 : "m" (v->counter));
15908 }
15909@@ -111,7 +221,7 @@ static inline void atomic64_dec(atomic64_t *v)
15910 */
15911 static inline int atomic64_dec_and_test(atomic64_t *v)
15912 {
15913- GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, "%0", "e");
15914+ GEN_UNARY_RMWcc(LOCK_PREFIX "decq", LOCK_PREFIX "incq", v->counter, "%0", "e");
15915 }
15916
15917 /**
15918@@ -124,7 +234,7 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
15919 */
15920 static inline int atomic64_inc_and_test(atomic64_t *v)
15921 {
15922- GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, "%0", "e");
15923+ GEN_UNARY_RMWcc(LOCK_PREFIX "incq", LOCK_PREFIX "decq", v->counter, "%0", "e");
15924 }
15925
15926 /**
15927@@ -138,7 +248,7 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
15928 */
15929 static inline int atomic64_add_negative(long i, atomic64_t *v)
15930 {
15931- GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, "er", i, "%0", "s");
15932+ GEN_BINARY_RMWcc(LOCK_PREFIX "addq", LOCK_PREFIX "subq", v->counter, "er", i, "%0", "s");
15933 }
15934
15935 /**
15936@@ -150,6 +260,18 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
15937 */
15938 static inline long atomic64_add_return(long i, atomic64_t *v)
15939 {
15940+ return i + xadd_check_overflow(&v->counter, i);
15941+}
15942+
15943+/**
15944+ * atomic64_add_return_unchecked - add and return
15945+ * @i: integer value to add
15946+ * @v: pointer to type atomic64_unchecked_t
15947+ *
15948+ * Atomically adds @i to @v and returns @i + @v
15949+ */
15950+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
15951+{
15952 return i + xadd(&v->counter, i);
15953 }
15954
15955@@ -159,6 +281,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
15956 }
15957
15958 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
15959+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
15960+{
15961+ return atomic64_add_return_unchecked(1, v);
15962+}
15963 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
15964
15965 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
15966@@ -166,6 +292,11 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
15967 return cmpxchg(&v->counter, old, new);
15968 }
15969
15970+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
15971+{
15972+ return cmpxchg(&v->counter, old, new);
15973+}
15974+
15975 static inline long atomic64_xchg(atomic64_t *v, long new)
15976 {
15977 return xchg(&v->counter, new);
15978@@ -182,17 +313,30 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
15979 */
15980 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
15981 {
15982- long c, old;
15983+ long c, old, new;
15984 c = atomic64_read(v);
15985 for (;;) {
15986- if (unlikely(c == (u)))
15987+ if (unlikely(c == u))
15988 break;
15989- old = atomic64_cmpxchg((v), c, c + (a));
15990+
15991+ asm volatile("add %2,%0\n"
15992+
15993+#ifdef CONFIG_PAX_REFCOUNT
15994+ "jno 0f\n"
15995+ "sub %2,%0\n"
15996+ "int $4\n0:\n"
15997+ _ASM_EXTABLE(0b, 0b)
15998+#endif
15999+
16000+ : "=r" (new)
16001+ : "0" (c), "ir" (a));
16002+
16003+ old = atomic64_cmpxchg(v, c, new);
16004 if (likely(old == c))
16005 break;
16006 c = old;
16007 }
16008- return c != (u);
16009+ return c != u;
16010 }
16011
16012 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
16013diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h
16014index 2ab1eb3..1e8cc5d 100644
16015--- a/arch/x86/include/asm/barrier.h
16016+++ b/arch/x86/include/asm/barrier.h
16017@@ -57,7 +57,7 @@
16018 do { \
16019 compiletime_assert_atomic_type(*p); \
16020 smp_mb(); \
16021- ACCESS_ONCE(*p) = (v); \
16022+ ACCESS_ONCE_RW(*p) = (v); \
16023 } while (0)
16024
16025 #define smp_load_acquire(p) \
16026@@ -74,7 +74,7 @@ do { \
16027 do { \
16028 compiletime_assert_atomic_type(*p); \
16029 barrier(); \
16030- ACCESS_ONCE(*p) = (v); \
16031+ ACCESS_ONCE_RW(*p) = (v); \
16032 } while (0)
16033
16034 #define smp_load_acquire(p) \
16035diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
16036index cfe3b95..d01b118 100644
16037--- a/arch/x86/include/asm/bitops.h
16038+++ b/arch/x86/include/asm/bitops.h
16039@@ -50,7 +50,7 @@
16040 * a mask operation on a byte.
16041 */
16042 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
16043-#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
16044+#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
16045 #define CONST_MASK(nr) (1 << ((nr) & 7))
16046
16047 /**
16048@@ -203,7 +203,7 @@ static inline void change_bit(long nr, volatile unsigned long *addr)
16049 */
16050 static inline int test_and_set_bit(long nr, volatile unsigned long *addr)
16051 {
16052- GEN_BINARY_RMWcc(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", "c");
16053+ GEN_BINARY_RMWcc_unchecked(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", "c");
16054 }
16055
16056 /**
16057@@ -249,7 +249,7 @@ static inline int __test_and_set_bit(long nr, volatile unsigned long *addr)
16058 */
16059 static inline int test_and_clear_bit(long nr, volatile unsigned long *addr)
16060 {
16061- GEN_BINARY_RMWcc(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", "c");
16062+ GEN_BINARY_RMWcc_unchecked(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", "c");
16063 }
16064
16065 /**
16066@@ -302,7 +302,7 @@ static inline int __test_and_change_bit(long nr, volatile unsigned long *addr)
16067 */
16068 static inline int test_and_change_bit(long nr, volatile unsigned long *addr)
16069 {
16070- GEN_BINARY_RMWcc(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", "c");
16071+ GEN_BINARY_RMWcc_unchecked(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", "c");
16072 }
16073
16074 static __always_inline int constant_test_bit(long nr, const volatile unsigned long *addr)
16075@@ -343,7 +343,7 @@ static int test_bit(int nr, const volatile unsigned long *addr);
16076 *
16077 * Undefined if no bit exists, so code should check against 0 first.
16078 */
16079-static inline unsigned long __ffs(unsigned long word)
16080+static inline unsigned long __intentional_overflow(-1) __ffs(unsigned long word)
16081 {
16082 asm("rep; bsf %1,%0"
16083 : "=r" (word)
16084@@ -357,7 +357,7 @@ static inline unsigned long __ffs(unsigned long word)
16085 *
16086 * Undefined if no zero exists, so code should check against ~0UL first.
16087 */
16088-static inline unsigned long ffz(unsigned long word)
16089+static inline unsigned long __intentional_overflow(-1) ffz(unsigned long word)
16090 {
16091 asm("rep; bsf %1,%0"
16092 : "=r" (word)
16093@@ -371,7 +371,7 @@ static inline unsigned long ffz(unsigned long word)
16094 *
16095 * Undefined if no set bit exists, so code should check against 0 first.
16096 */
16097-static inline unsigned long __fls(unsigned long word)
16098+static inline unsigned long __intentional_overflow(-1) __fls(unsigned long word)
16099 {
16100 asm("bsr %1,%0"
16101 : "=r" (word)
16102@@ -434,7 +434,7 @@ static inline int ffs(int x)
16103 * set bit if value is nonzero. The last (most significant) bit is
16104 * at position 32.
16105 */
16106-static inline int fls(int x)
16107+static inline int __intentional_overflow(-1) fls(int x)
16108 {
16109 int r;
16110
16111@@ -476,7 +476,7 @@ static inline int fls(int x)
16112 * at position 64.
16113 */
16114 #ifdef CONFIG_X86_64
16115-static __always_inline int fls64(__u64 x)
16116+static __always_inline __intentional_overflow(-1) int fls64(__u64 x)
16117 {
16118 int bitpos = -1;
16119 /*
16120diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
16121index 4fa687a..60f2d39 100644
16122--- a/arch/x86/include/asm/boot.h
16123+++ b/arch/x86/include/asm/boot.h
16124@@ -6,10 +6,15 @@
16125 #include <uapi/asm/boot.h>
16126
16127 /* Physical address where kernel should be loaded. */
16128-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
16129+#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
16130 + (CONFIG_PHYSICAL_ALIGN - 1)) \
16131 & ~(CONFIG_PHYSICAL_ALIGN - 1))
16132
16133+#ifndef __ASSEMBLY__
16134+extern unsigned char __LOAD_PHYSICAL_ADDR[];
16135+#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
16136+#endif
16137+
16138 /* Minimum kernel alignment, as a power of two */
16139 #ifdef CONFIG_X86_64
16140 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
16141diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
16142index 48f99f1..d78ebf9 100644
16143--- a/arch/x86/include/asm/cache.h
16144+++ b/arch/x86/include/asm/cache.h
16145@@ -5,12 +5,13 @@
16146
16147 /* L1 cache line size */
16148 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
16149-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
16150+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
16151
16152 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
16153+#define __read_only __attribute__((__section__(".data..read_only")))
16154
16155 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
16156-#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
16157+#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
16158
16159 #ifdef CONFIG_X86_VSMP
16160 #ifdef CONFIG_SMP
16161diff --git a/arch/x86/include/asm/calling.h b/arch/x86/include/asm/calling.h
16162index 1f1297b..72b8439 100644
16163--- a/arch/x86/include/asm/calling.h
16164+++ b/arch/x86/include/asm/calling.h
16165@@ -82,106 +82,117 @@ For 32-bit we have the following conventions - kernel is built with
16166 #define RSP 152
16167 #define SS 160
16168
16169-#define ARGOFFSET R11
16170+#define ARGOFFSET R15
16171
16172 .macro SAVE_ARGS addskip=0, save_rcx=1, save_r891011=1, rax_enosys=0
16173- subq $9*8+\addskip, %rsp
16174- CFI_ADJUST_CFA_OFFSET 9*8+\addskip
16175- movq_cfi rdi, 8*8
16176- movq_cfi rsi, 7*8
16177- movq_cfi rdx, 6*8
16178+ subq $ORIG_RAX-ARGOFFSET+\addskip, %rsp
16179+ CFI_ADJUST_CFA_OFFSET ORIG_RAX-ARGOFFSET+\addskip
16180+ movq_cfi rdi, RDI
16181+ movq_cfi rsi, RSI
16182+ movq_cfi rdx, RDX
16183
16184 .if \save_rcx
16185- movq_cfi rcx, 5*8
16186+ movq_cfi rcx, RCX
16187 .endif
16188
16189 .if \rax_enosys
16190- movq $-ENOSYS, 4*8(%rsp)
16191+ movq $-ENOSYS, RAX(%rsp)
16192 .else
16193- movq_cfi rax, 4*8
16194+ movq_cfi rax, RAX
16195 .endif
16196
16197 .if \save_r891011
16198- movq_cfi r8, 3*8
16199- movq_cfi r9, 2*8
16200- movq_cfi r10, 1*8
16201- movq_cfi r11, 0*8
16202+ movq_cfi r8, R8
16203+ movq_cfi r9, R9
16204+ movq_cfi r10, R10
16205+ movq_cfi r11, R11
16206 .endif
16207
16208+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
16209+ movq_cfi r12, R12
16210+#endif
16211+
16212 .endm
16213
16214-#define ARG_SKIP (9*8)
16215+#define ARG_SKIP ORIG_RAX
16216
16217 .macro RESTORE_ARGS rstor_rax=1, addskip=0, rstor_rcx=1, rstor_r11=1, \
16218 rstor_r8910=1, rstor_rdx=1
16219+
16220+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
16221+ movq_cfi_restore R12, r12
16222+#endif
16223+
16224 .if \rstor_r11
16225- movq_cfi_restore 0*8, r11
16226+ movq_cfi_restore R11, r11
16227 .endif
16228
16229 .if \rstor_r8910
16230- movq_cfi_restore 1*8, r10
16231- movq_cfi_restore 2*8, r9
16232- movq_cfi_restore 3*8, r8
16233+ movq_cfi_restore R10, r10
16234+ movq_cfi_restore R9, r9
16235+ movq_cfi_restore R8, r8
16236 .endif
16237
16238 .if \rstor_rax
16239- movq_cfi_restore 4*8, rax
16240+ movq_cfi_restore RAX, rax
16241 .endif
16242
16243 .if \rstor_rcx
16244- movq_cfi_restore 5*8, rcx
16245+ movq_cfi_restore RCX, rcx
16246 .endif
16247
16248 .if \rstor_rdx
16249- movq_cfi_restore 6*8, rdx
16250+ movq_cfi_restore RDX, rdx
16251 .endif
16252
16253- movq_cfi_restore 7*8, rsi
16254- movq_cfi_restore 8*8, rdi
16255+ movq_cfi_restore RSI, rsi
16256+ movq_cfi_restore RDI, rdi
16257
16258- .if ARG_SKIP+\addskip > 0
16259- addq $ARG_SKIP+\addskip, %rsp
16260- CFI_ADJUST_CFA_OFFSET -(ARG_SKIP+\addskip)
16261+ .if ORIG_RAX+\addskip > 0
16262+ addq $ORIG_RAX+\addskip, %rsp
16263+ CFI_ADJUST_CFA_OFFSET -(ORIG_RAX+\addskip)
16264 .endif
16265 .endm
16266
16267- .macro LOAD_ARGS offset, skiprax=0
16268- movq \offset(%rsp), %r11
16269- movq \offset+8(%rsp), %r10
16270- movq \offset+16(%rsp), %r9
16271- movq \offset+24(%rsp), %r8
16272- movq \offset+40(%rsp), %rcx
16273- movq \offset+48(%rsp), %rdx
16274- movq \offset+56(%rsp), %rsi
16275- movq \offset+64(%rsp), %rdi
16276+ .macro LOAD_ARGS skiprax=0
16277+ movq R11(%rsp), %r11
16278+ movq R10(%rsp), %r10
16279+ movq R9(%rsp), %r9
16280+ movq R8(%rsp), %r8
16281+ movq RCX(%rsp), %rcx
16282+ movq RDX(%rsp), %rdx
16283+ movq RSI(%rsp), %rsi
16284+ movq RDI(%rsp), %rdi
16285 .if \skiprax
16286 .else
16287- movq \offset+72(%rsp), %rax
16288+ movq ORIG_RAX(%rsp), %rax
16289 .endif
16290 .endm
16291
16292-#define REST_SKIP (6*8)
16293-
16294 .macro SAVE_REST
16295- subq $REST_SKIP, %rsp
16296- CFI_ADJUST_CFA_OFFSET REST_SKIP
16297- movq_cfi rbx, 5*8
16298- movq_cfi rbp, 4*8
16299- movq_cfi r12, 3*8
16300- movq_cfi r13, 2*8
16301- movq_cfi r14, 1*8
16302- movq_cfi r15, 0*8
16303+ movq_cfi rbx, RBX
16304+ movq_cfi rbp, RBP
16305+
16306+#ifndef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
16307+ movq_cfi r12, R12
16308+#endif
16309+
16310+ movq_cfi r13, R13
16311+ movq_cfi r14, R14
16312+ movq_cfi r15, R15
16313 .endm
16314
16315 .macro RESTORE_REST
16316- movq_cfi_restore 0*8, r15
16317- movq_cfi_restore 1*8, r14
16318- movq_cfi_restore 2*8, r13
16319- movq_cfi_restore 3*8, r12
16320- movq_cfi_restore 4*8, rbp
16321- movq_cfi_restore 5*8, rbx
16322- addq $REST_SKIP, %rsp
16323- CFI_ADJUST_CFA_OFFSET -(REST_SKIP)
16324+ movq_cfi_restore R15, r15
16325+ movq_cfi_restore R14, r14
16326+ movq_cfi_restore R13, r13
16327+
16328+#ifndef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
16329+ movq_cfi_restore R12, r12
16330+#endif
16331+
16332+ movq_cfi_restore RBP, rbp
16333+ movq_cfi_restore RBX, rbx
16334 .endm
16335
16336 .macro SAVE_ALL
16337diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
16338index f50de69..2b0a458 100644
16339--- a/arch/x86/include/asm/checksum_32.h
16340+++ b/arch/x86/include/asm/checksum_32.h
16341@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
16342 int len, __wsum sum,
16343 int *src_err_ptr, int *dst_err_ptr);
16344
16345+asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
16346+ int len, __wsum sum,
16347+ int *src_err_ptr, int *dst_err_ptr);
16348+
16349+asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
16350+ int len, __wsum sum,
16351+ int *src_err_ptr, int *dst_err_ptr);
16352+
16353 /*
16354 * Note: when you get a NULL pointer exception here this means someone
16355 * passed in an incorrect kernel address to one of these functions.
16356@@ -53,7 +61,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
16357
16358 might_sleep();
16359 stac();
16360- ret = csum_partial_copy_generic((__force void *)src, dst,
16361+ ret = csum_partial_copy_generic_from_user((__force void *)src, dst,
16362 len, sum, err_ptr, NULL);
16363 clac();
16364
16365@@ -187,7 +195,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
16366 might_sleep();
16367 if (access_ok(VERIFY_WRITE, dst, len)) {
16368 stac();
16369- ret = csum_partial_copy_generic(src, (__force void *)dst,
16370+ ret = csum_partial_copy_generic_to_user(src, (__force void *)dst,
16371 len, sum, NULL, err_ptr);
16372 clac();
16373 return ret;
16374diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
16375index 99c105d7..2f667ac 100644
16376--- a/arch/x86/include/asm/cmpxchg.h
16377+++ b/arch/x86/include/asm/cmpxchg.h
16378@@ -16,8 +16,12 @@ extern void __cmpxchg_wrong_size(void)
16379 __compiletime_error("Bad argument size for cmpxchg");
16380 extern void __xadd_wrong_size(void)
16381 __compiletime_error("Bad argument size for xadd");
16382+extern void __xadd_check_overflow_wrong_size(void)
16383+ __compiletime_error("Bad argument size for xadd_check_overflow");
16384 extern void __add_wrong_size(void)
16385 __compiletime_error("Bad argument size for add");
16386+extern void __add_check_overflow_wrong_size(void)
16387+ __compiletime_error("Bad argument size for add_check_overflow");
16388
16389 /*
16390 * Constants for operation sizes. On 32-bit, the 64-bit size it set to
16391@@ -69,6 +73,38 @@ extern void __add_wrong_size(void)
16392 __ret; \
16393 })
16394
16395+#ifdef CONFIG_PAX_REFCOUNT
16396+#define __xchg_op_check_overflow(ptr, arg, op, lock) \
16397+ ({ \
16398+ __typeof__ (*(ptr)) __ret = (arg); \
16399+ switch (sizeof(*(ptr))) { \
16400+ case __X86_CASE_L: \
16401+ asm volatile (lock #op "l %0, %1\n" \
16402+ "jno 0f\n" \
16403+ "mov %0,%1\n" \
16404+ "int $4\n0:\n" \
16405+ _ASM_EXTABLE(0b, 0b) \
16406+ : "+r" (__ret), "+m" (*(ptr)) \
16407+ : : "memory", "cc"); \
16408+ break; \
16409+ case __X86_CASE_Q: \
16410+ asm volatile (lock #op "q %q0, %1\n" \
16411+ "jno 0f\n" \
16412+ "mov %0,%1\n" \
16413+ "int $4\n0:\n" \
16414+ _ASM_EXTABLE(0b, 0b) \
16415+ : "+r" (__ret), "+m" (*(ptr)) \
16416+ : : "memory", "cc"); \
16417+ break; \
16418+ default: \
16419+ __ ## op ## _check_overflow_wrong_size(); \
16420+ } \
16421+ __ret; \
16422+ })
16423+#else
16424+#define __xchg_op_check_overflow(ptr, arg, op, lock) __xchg_op(ptr, arg, op, lock)
16425+#endif
16426+
16427 /*
16428 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
16429 * Since this is generally used to protect other memory information, we
16430@@ -167,6 +203,9 @@ extern void __add_wrong_size(void)
16431 #define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
16432 #define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
16433
16434+#define __xadd_check_overflow(ptr, inc, lock) __xchg_op_check_overflow((ptr), (inc), xadd, lock)
16435+#define xadd_check_overflow(ptr, inc) __xadd_check_overflow((ptr), (inc), LOCK_PREFIX)
16436+
16437 #define __add(ptr, inc, lock) \
16438 ({ \
16439 __typeof__ (*(ptr)) __ret = (inc); \
16440diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h
16441index 59c6c40..5e0b22c 100644
16442--- a/arch/x86/include/asm/compat.h
16443+++ b/arch/x86/include/asm/compat.h
16444@@ -41,7 +41,7 @@ typedef s64 __attribute__((aligned(4))) compat_s64;
16445 typedef u32 compat_uint_t;
16446 typedef u32 compat_ulong_t;
16447 typedef u64 __attribute__((aligned(4))) compat_u64;
16448-typedef u32 compat_uptr_t;
16449+typedef u32 __user compat_uptr_t;
16450
16451 struct compat_timespec {
16452 compat_time_t tv_sec;
16453diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
16454index 90a5485..43b6211 100644
16455--- a/arch/x86/include/asm/cpufeature.h
16456+++ b/arch/x86/include/asm/cpufeature.h
16457@@ -213,7 +213,7 @@
16458 #define X86_FEATURE_PAUSEFILTER ( 8*32+13) /* AMD filtered pause intercept */
16459 #define X86_FEATURE_PFTHRESHOLD ( 8*32+14) /* AMD pause filter threshold */
16460 #define X86_FEATURE_VMMCALL ( 8*32+15) /* Prefer vmmcall to vmcall */
16461-
16462+#define X86_FEATURE_STRONGUDEREF (8*32+31) /* PaX PCID based strong UDEREF */
16463
16464 /* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */
16465 #define X86_FEATURE_FSGSBASE ( 9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/
16466@@ -221,7 +221,7 @@
16467 #define X86_FEATURE_BMI1 ( 9*32+ 3) /* 1st group bit manipulation extensions */
16468 #define X86_FEATURE_HLE ( 9*32+ 4) /* Hardware Lock Elision */
16469 #define X86_FEATURE_AVX2 ( 9*32+ 5) /* AVX2 instructions */
16470-#define X86_FEATURE_SMEP ( 9*32+ 7) /* Supervisor Mode Execution Protection */
16471+#define X86_FEATURE_SMEP ( 9*32+ 7) /* Supervisor Mode Execution Prevention */
16472 #define X86_FEATURE_BMI2 ( 9*32+ 8) /* 2nd group bit manipulation extensions */
16473 #define X86_FEATURE_ERMS ( 9*32+ 9) /* Enhanced REP MOVSB/STOSB */
16474 #define X86_FEATURE_INVPCID ( 9*32+10) /* Invalidate Processor Context ID */
16475@@ -390,6 +390,7 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
16476 #define cpu_has_eager_fpu boot_cpu_has(X86_FEATURE_EAGER_FPU)
16477 #define cpu_has_topoext boot_cpu_has(X86_FEATURE_TOPOEXT)
16478 #define cpu_has_bpext boot_cpu_has(X86_FEATURE_BPEXT)
16479+#define cpu_has_pcid boot_cpu_has(X86_FEATURE_PCID)
16480
16481 #if __GNUC__ >= 4
16482 extern void warn_pre_alternatives(void);
16483@@ -441,7 +442,8 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
16484
16485 #ifdef CONFIG_X86_DEBUG_STATIC_CPU_HAS
16486 t_warn:
16487- warn_pre_alternatives();
16488+ if (bit != X86_FEATURE_PCID && bit != X86_FEATURE_INVPCID)
16489+ warn_pre_alternatives();
16490 return false;
16491 #endif
16492
16493@@ -461,7 +463,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
16494 ".section .discard,\"aw\",@progbits\n"
16495 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
16496 ".previous\n"
16497- ".section .altinstr_replacement,\"ax\"\n"
16498+ ".section .altinstr_replacement,\"a\"\n"
16499 "3: movb $1,%0\n"
16500 "4:\n"
16501 ".previous\n"
16502@@ -498,7 +500,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
16503 " .byte 2b - 1b\n" /* src len */
16504 " .byte 4f - 3f\n" /* repl len */
16505 ".previous\n"
16506- ".section .altinstr_replacement,\"ax\"\n"
16507+ ".section .altinstr_replacement,\"a\"\n"
16508 "3: .byte 0xe9\n .long %l[t_no] - 2b\n"
16509 "4:\n"
16510 ".previous\n"
16511@@ -531,7 +533,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
16512 ".section .discard,\"aw\",@progbits\n"
16513 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
16514 ".previous\n"
16515- ".section .altinstr_replacement,\"ax\"\n"
16516+ ".section .altinstr_replacement,\"a\"\n"
16517 "3: movb $0,%0\n"
16518 "4:\n"
16519 ".previous\n"
16520@@ -545,7 +547,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
16521 ".section .discard,\"aw\",@progbits\n"
16522 " .byte 0xff + (6f-5f) - (4b-3b)\n" /* size check */
16523 ".previous\n"
16524- ".section .altinstr_replacement,\"ax\"\n"
16525+ ".section .altinstr_replacement,\"a\"\n"
16526 "5: movb $1,%0\n"
16527 "6:\n"
16528 ".previous\n"
16529diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
16530index a94b82e..59ecefa 100644
16531--- a/arch/x86/include/asm/desc.h
16532+++ b/arch/x86/include/asm/desc.h
16533@@ -4,6 +4,7 @@
16534 #include <asm/desc_defs.h>
16535 #include <asm/ldt.h>
16536 #include <asm/mmu.h>
16537+#include <asm/pgtable.h>
16538
16539 #include <linux/smp.h>
16540 #include <linux/percpu.h>
16541@@ -17,6 +18,7 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
16542
16543 desc->type = (info->read_exec_only ^ 1) << 1;
16544 desc->type |= info->contents << 2;
16545+ desc->type |= info->seg_not_present ^ 1;
16546
16547 desc->s = 1;
16548 desc->dpl = 0x3;
16549@@ -35,19 +37,14 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
16550 }
16551
16552 extern struct desc_ptr idt_descr;
16553-extern gate_desc idt_table[];
16554-extern struct desc_ptr debug_idt_descr;
16555-extern gate_desc debug_idt_table[];
16556-
16557-struct gdt_page {
16558- struct desc_struct gdt[GDT_ENTRIES];
16559-} __attribute__((aligned(PAGE_SIZE)));
16560-
16561-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
16562+extern gate_desc idt_table[IDT_ENTRIES];
16563+extern const struct desc_ptr debug_idt_descr;
16564+extern gate_desc debug_idt_table[IDT_ENTRIES];
16565
16566+extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
16567 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
16568 {
16569- return per_cpu(gdt_page, cpu).gdt;
16570+ return cpu_gdt_table[cpu];
16571 }
16572
16573 #ifdef CONFIG_X86_64
16574@@ -72,8 +69,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
16575 unsigned long base, unsigned dpl, unsigned flags,
16576 unsigned short seg)
16577 {
16578- gate->a = (seg << 16) | (base & 0xffff);
16579- gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
16580+ gate->gate.offset_low = base;
16581+ gate->gate.seg = seg;
16582+ gate->gate.reserved = 0;
16583+ gate->gate.type = type;
16584+ gate->gate.s = 0;
16585+ gate->gate.dpl = dpl;
16586+ gate->gate.p = 1;
16587+ gate->gate.offset_high = base >> 16;
16588 }
16589
16590 #endif
16591@@ -118,12 +121,16 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
16592
16593 static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
16594 {
16595+ pax_open_kernel();
16596 memcpy(&idt[entry], gate, sizeof(*gate));
16597+ pax_close_kernel();
16598 }
16599
16600 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
16601 {
16602+ pax_open_kernel();
16603 memcpy(&ldt[entry], desc, 8);
16604+ pax_close_kernel();
16605 }
16606
16607 static inline void
16608@@ -137,7 +144,9 @@ native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int
16609 default: size = sizeof(*gdt); break;
16610 }
16611
16612+ pax_open_kernel();
16613 memcpy(&gdt[entry], desc, size);
16614+ pax_close_kernel();
16615 }
16616
16617 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
16618@@ -210,7 +219,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
16619
16620 static inline void native_load_tr_desc(void)
16621 {
16622+ pax_open_kernel();
16623 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
16624+ pax_close_kernel();
16625 }
16626
16627 static inline void native_load_gdt(const struct desc_ptr *dtr)
16628@@ -247,8 +258,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
16629 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
16630 unsigned int i;
16631
16632+ pax_open_kernel();
16633 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
16634 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
16635+ pax_close_kernel();
16636 }
16637
16638 /* This intentionally ignores lm, since 32-bit apps don't have that field. */
16639@@ -295,7 +308,7 @@ static inline void load_LDT(mm_context_t *pc)
16640 preempt_enable();
16641 }
16642
16643-static inline unsigned long get_desc_base(const struct desc_struct *desc)
16644+static inline unsigned long __intentional_overflow(-1) get_desc_base(const struct desc_struct *desc)
16645 {
16646 return (unsigned)(desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24));
16647 }
16648@@ -319,7 +332,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
16649 }
16650
16651 #ifdef CONFIG_X86_64
16652-static inline void set_nmi_gate(int gate, void *addr)
16653+static inline void set_nmi_gate(int gate, const void *addr)
16654 {
16655 gate_desc s;
16656
16657@@ -329,14 +342,14 @@ static inline void set_nmi_gate(int gate, void *addr)
16658 #endif
16659
16660 #ifdef CONFIG_TRACING
16661-extern struct desc_ptr trace_idt_descr;
16662-extern gate_desc trace_idt_table[];
16663+extern const struct desc_ptr trace_idt_descr;
16664+extern gate_desc trace_idt_table[IDT_ENTRIES];
16665 static inline void write_trace_idt_entry(int entry, const gate_desc *gate)
16666 {
16667 write_idt_entry(trace_idt_table, entry, gate);
16668 }
16669
16670-static inline void _trace_set_gate(int gate, unsigned type, void *addr,
16671+static inline void _trace_set_gate(int gate, unsigned type, const void *addr,
16672 unsigned dpl, unsigned ist, unsigned seg)
16673 {
16674 gate_desc s;
16675@@ -356,7 +369,7 @@ static inline void write_trace_idt_entry(int entry, const gate_desc *gate)
16676 #define _trace_set_gate(gate, type, addr, dpl, ist, seg)
16677 #endif
16678
16679-static inline void _set_gate(int gate, unsigned type, void *addr,
16680+static inline void _set_gate(int gate, unsigned type, const void *addr,
16681 unsigned dpl, unsigned ist, unsigned seg)
16682 {
16683 gate_desc s;
16684@@ -379,9 +392,9 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
16685 #define set_intr_gate(n, addr) \
16686 do { \
16687 BUG_ON((unsigned)n > 0xFF); \
16688- _set_gate(n, GATE_INTERRUPT, (void *)addr, 0, 0, \
16689+ _set_gate(n, GATE_INTERRUPT, (const void *)addr, 0, 0, \
16690 __KERNEL_CS); \
16691- _trace_set_gate(n, GATE_INTERRUPT, (void *)trace_##addr,\
16692+ _trace_set_gate(n, GATE_INTERRUPT, (const void *)trace_##addr,\
16693 0, 0, __KERNEL_CS); \
16694 } while (0)
16695
16696@@ -409,19 +422,19 @@ static inline void alloc_system_vector(int vector)
16697 /*
16698 * This routine sets up an interrupt gate at directory privilege level 3.
16699 */
16700-static inline void set_system_intr_gate(unsigned int n, void *addr)
16701+static inline void set_system_intr_gate(unsigned int n, const void *addr)
16702 {
16703 BUG_ON((unsigned)n > 0xFF);
16704 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
16705 }
16706
16707-static inline void set_system_trap_gate(unsigned int n, void *addr)
16708+static inline void set_system_trap_gate(unsigned int n, const void *addr)
16709 {
16710 BUG_ON((unsigned)n > 0xFF);
16711 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
16712 }
16713
16714-static inline void set_trap_gate(unsigned int n, void *addr)
16715+static inline void set_trap_gate(unsigned int n, const void *addr)
16716 {
16717 BUG_ON((unsigned)n > 0xFF);
16718 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
16719@@ -430,16 +443,16 @@ static inline void set_trap_gate(unsigned int n, void *addr)
16720 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
16721 {
16722 BUG_ON((unsigned)n > 0xFF);
16723- _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
16724+ _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
16725 }
16726
16727-static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
16728+static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
16729 {
16730 BUG_ON((unsigned)n > 0xFF);
16731 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
16732 }
16733
16734-static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
16735+static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
16736 {
16737 BUG_ON((unsigned)n > 0xFF);
16738 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
16739@@ -511,4 +524,17 @@ static inline void load_current_idt(void)
16740 else
16741 load_idt((const struct desc_ptr *)&idt_descr);
16742 }
16743+
16744+#ifdef CONFIG_X86_32
16745+static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
16746+{
16747+ struct desc_struct d;
16748+
16749+ if (likely(limit))
16750+ limit = (limit - 1UL) >> PAGE_SHIFT;
16751+ pack_descriptor(&d, base, limit, 0xFB, 0xC);
16752+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
16753+}
16754+#endif
16755+
16756 #endif /* _ASM_X86_DESC_H */
16757diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
16758index 278441f..b95a174 100644
16759--- a/arch/x86/include/asm/desc_defs.h
16760+++ b/arch/x86/include/asm/desc_defs.h
16761@@ -31,6 +31,12 @@ struct desc_struct {
16762 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
16763 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
16764 };
16765+ struct {
16766+ u16 offset_low;
16767+ u16 seg;
16768+ unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
16769+ unsigned offset_high: 16;
16770+ } gate;
16771 };
16772 } __attribute__((packed));
16773
16774diff --git a/arch/x86/include/asm/div64.h b/arch/x86/include/asm/div64.h
16775index ced283a..ffe04cc 100644
16776--- a/arch/x86/include/asm/div64.h
16777+++ b/arch/x86/include/asm/div64.h
16778@@ -39,7 +39,7 @@
16779 __mod; \
16780 })
16781
16782-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
16783+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
16784 {
16785 union {
16786 u64 v64;
16787diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
16788index ca3347a..1a5082a 100644
16789--- a/arch/x86/include/asm/elf.h
16790+++ b/arch/x86/include/asm/elf.h
16791@@ -75,9 +75,6 @@ typedef struct user_fxsr_struct elf_fpxregset_t;
16792
16793 #include <asm/vdso.h>
16794
16795-#ifdef CONFIG_X86_64
16796-extern unsigned int vdso64_enabled;
16797-#endif
16798 #if defined(CONFIG_X86_32) || defined(CONFIG_COMPAT)
16799 extern unsigned int vdso32_enabled;
16800 #endif
16801@@ -249,7 +246,25 @@ extern int force_personality32;
16802 the loader. We need to make sure that it is out of the way of the program
16803 that it will "exec", and that there is sufficient room for the brk. */
16804
16805+#ifdef CONFIG_PAX_SEGMEXEC
16806+#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
16807+#else
16808 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
16809+#endif
16810+
16811+#ifdef CONFIG_PAX_ASLR
16812+#ifdef CONFIG_X86_32
16813+#define PAX_ELF_ET_DYN_BASE 0x10000000UL
16814+
16815+#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
16816+#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
16817+#else
16818+#define PAX_ELF_ET_DYN_BASE 0x400000UL
16819+
16820+#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
16821+#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
16822+#endif
16823+#endif
16824
16825 /* This yields a mask that user programs can use to figure out what
16826 instruction set this CPU supports. This could be done in user space,
16827@@ -298,17 +313,13 @@ do { \
16828
16829 #define ARCH_DLINFO \
16830 do { \
16831- if (vdso64_enabled) \
16832- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
16833- (unsigned long __force)current->mm->context.vdso); \
16834+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
16835 } while (0)
16836
16837 /* As a historical oddity, the x32 and x86_64 vDSOs are controlled together. */
16838 #define ARCH_DLINFO_X32 \
16839 do { \
16840- if (vdso64_enabled) \
16841- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
16842- (unsigned long __force)current->mm->context.vdso); \
16843+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
16844 } while (0)
16845
16846 #define AT_SYSINFO 32
16847@@ -323,10 +334,10 @@ else \
16848
16849 #endif /* !CONFIG_X86_32 */
16850
16851-#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
16852+#define VDSO_CURRENT_BASE (current->mm->context.vdso)
16853
16854 #define VDSO_ENTRY \
16855- ((unsigned long)current->mm->context.vdso + \
16856+ (current->mm->context.vdso + \
16857 selected_vdso32->sym___kernel_vsyscall)
16858
16859 struct linux_binprm;
16860@@ -338,9 +349,6 @@ extern int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
16861 int uses_interp);
16862 #define compat_arch_setup_additional_pages compat_arch_setup_additional_pages
16863
16864-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
16865-#define arch_randomize_brk arch_randomize_brk
16866-
16867 /*
16868 * True on X86_32 or when emulating IA32 on X86_64
16869 */
16870diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
16871index 77a99ac..39ff7f5 100644
16872--- a/arch/x86/include/asm/emergency-restart.h
16873+++ b/arch/x86/include/asm/emergency-restart.h
16874@@ -1,6 +1,6 @@
16875 #ifndef _ASM_X86_EMERGENCY_RESTART_H
16876 #define _ASM_X86_EMERGENCY_RESTART_H
16877
16878-extern void machine_emergency_restart(void);
16879+extern void machine_emergency_restart(void) __noreturn;
16880
16881 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
16882diff --git a/arch/x86/include/asm/floppy.h b/arch/x86/include/asm/floppy.h
16883index 1c7eefe..d0e4702 100644
16884--- a/arch/x86/include/asm/floppy.h
16885+++ b/arch/x86/include/asm/floppy.h
16886@@ -229,18 +229,18 @@ static struct fd_routine_l {
16887 int (*_dma_setup)(char *addr, unsigned long size, int mode, int io);
16888 } fd_routine[] = {
16889 {
16890- request_dma,
16891- free_dma,
16892- get_dma_residue,
16893- dma_mem_alloc,
16894- hard_dma_setup
16895+ ._request_dma = request_dma,
16896+ ._free_dma = free_dma,
16897+ ._get_dma_residue = get_dma_residue,
16898+ ._dma_mem_alloc = dma_mem_alloc,
16899+ ._dma_setup = hard_dma_setup
16900 },
16901 {
16902- vdma_request_dma,
16903- vdma_nop,
16904- vdma_get_dma_residue,
16905- vdma_mem_alloc,
16906- vdma_dma_setup
16907+ ._request_dma = vdma_request_dma,
16908+ ._free_dma = vdma_nop,
16909+ ._get_dma_residue = vdma_get_dma_residue,
16910+ ._dma_mem_alloc = vdma_mem_alloc,
16911+ ._dma_setup = vdma_dma_setup
16912 }
16913 };
16914
16915diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h
16916index 72ba21a..79f3f66 100644
16917--- a/arch/x86/include/asm/fpu-internal.h
16918+++ b/arch/x86/include/asm/fpu-internal.h
16919@@ -124,8 +124,11 @@ static inline void sanitize_i387_state(struct task_struct *tsk)
16920 #define user_insn(insn, output, input...) \
16921 ({ \
16922 int err; \
16923+ pax_open_userland(); \
16924 asm volatile(ASM_STAC "\n" \
16925- "1:" #insn "\n\t" \
16926+ "1:" \
16927+ __copyuser_seg \
16928+ #insn "\n\t" \
16929 "2: " ASM_CLAC "\n" \
16930 ".section .fixup,\"ax\"\n" \
16931 "3: movl $-1,%[err]\n" \
16932@@ -134,6 +137,7 @@ static inline void sanitize_i387_state(struct task_struct *tsk)
16933 _ASM_EXTABLE(1b, 3b) \
16934 : [err] "=r" (err), output \
16935 : "0"(0), input); \
16936+ pax_close_userland(); \
16937 err; \
16938 })
16939
16940@@ -300,7 +304,7 @@ static inline int restore_fpu_checking(struct task_struct *tsk)
16941 "fnclex\n\t"
16942 "emms\n\t"
16943 "fildl %P[addr]" /* set F?P to defined value */
16944- : : [addr] "m" (tsk->thread.fpu.has_fpu));
16945+ : : [addr] "m" (init_tss[raw_smp_processor_id()].x86_tss.sp0));
16946 }
16947
16948 return fpu_restore_checking(&tsk->thread.fpu);
16949diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
16950index b4c1f54..e290c08 100644
16951--- a/arch/x86/include/asm/futex.h
16952+++ b/arch/x86/include/asm/futex.h
16953@@ -12,6 +12,7 @@
16954 #include <asm/smap.h>
16955
16956 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
16957+ typecheck(u32 __user *, uaddr); \
16958 asm volatile("\t" ASM_STAC "\n" \
16959 "1:\t" insn "\n" \
16960 "2:\t" ASM_CLAC "\n" \
16961@@ -20,15 +21,16 @@
16962 "\tjmp\t2b\n" \
16963 "\t.previous\n" \
16964 _ASM_EXTABLE(1b, 3b) \
16965- : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
16966+ : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr)) \
16967 : "i" (-EFAULT), "0" (oparg), "1" (0))
16968
16969 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
16970+ typecheck(u32 __user *, uaddr); \
16971 asm volatile("\t" ASM_STAC "\n" \
16972 "1:\tmovl %2, %0\n" \
16973 "\tmovl\t%0, %3\n" \
16974 "\t" insn "\n" \
16975- "2:\t" LOCK_PREFIX "cmpxchgl %3, %2\n" \
16976+ "2:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %3, %2\n" \
16977 "\tjnz\t1b\n" \
16978 "3:\t" ASM_CLAC "\n" \
16979 "\t.section .fixup,\"ax\"\n" \
16980@@ -38,7 +40,7 @@
16981 _ASM_EXTABLE(1b, 4b) \
16982 _ASM_EXTABLE(2b, 4b) \
16983 : "=&a" (oldval), "=&r" (ret), \
16984- "+m" (*uaddr), "=&r" (tem) \
16985+ "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
16986 : "r" (oparg), "i" (-EFAULT), "1" (0))
16987
16988 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
16989@@ -57,12 +59,13 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
16990
16991 pagefault_disable();
16992
16993+ pax_open_userland();
16994 switch (op) {
16995 case FUTEX_OP_SET:
16996- __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
16997+ __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
16998 break;
16999 case FUTEX_OP_ADD:
17000- __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
17001+ __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
17002 uaddr, oparg);
17003 break;
17004 case FUTEX_OP_OR:
17005@@ -77,6 +80,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
17006 default:
17007 ret = -ENOSYS;
17008 }
17009+ pax_close_userland();
17010
17011 pagefault_enable();
17012
17013diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
17014index 9662290..49ca5e5 100644
17015--- a/arch/x86/include/asm/hw_irq.h
17016+++ b/arch/x86/include/asm/hw_irq.h
17017@@ -160,8 +160,8 @@ static inline void unlock_vector_lock(void) {}
17018 #endif /* CONFIG_X86_LOCAL_APIC */
17019
17020 /* Statistics */
17021-extern atomic_t irq_err_count;
17022-extern atomic_t irq_mis_count;
17023+extern atomic_unchecked_t irq_err_count;
17024+extern atomic_unchecked_t irq_mis_count;
17025
17026 /* EISA */
17027 extern void eisa_set_level_irq(unsigned int irq);
17028diff --git a/arch/x86/include/asm/i8259.h b/arch/x86/include/asm/i8259.h
17029index ccffa53..3c90c87 100644
17030--- a/arch/x86/include/asm/i8259.h
17031+++ b/arch/x86/include/asm/i8259.h
17032@@ -62,7 +62,7 @@ struct legacy_pic {
17033 void (*init)(int auto_eoi);
17034 int (*irq_pending)(unsigned int irq);
17035 void (*make_irq)(unsigned int irq);
17036-};
17037+} __do_const;
17038
17039 extern struct legacy_pic *legacy_pic;
17040 extern struct legacy_pic null_legacy_pic;
17041diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
17042index 34a5b93..27e40a6 100644
17043--- a/arch/x86/include/asm/io.h
17044+++ b/arch/x86/include/asm/io.h
17045@@ -52,12 +52,12 @@ static inline void name(type val, volatile void __iomem *addr) \
17046 "m" (*(volatile type __force *)addr) barrier); }
17047
17048 build_mmio_read(readb, "b", unsigned char, "=q", :"memory")
17049-build_mmio_read(readw, "w", unsigned short, "=r", :"memory")
17050-build_mmio_read(readl, "l", unsigned int, "=r", :"memory")
17051+build_mmio_read(__intentional_overflow(-1) readw, "w", unsigned short, "=r", :"memory")
17052+build_mmio_read(__intentional_overflow(-1) readl, "l", unsigned int, "=r", :"memory")
17053
17054 build_mmio_read(__readb, "b", unsigned char, "=q", )
17055-build_mmio_read(__readw, "w", unsigned short, "=r", )
17056-build_mmio_read(__readl, "l", unsigned int, "=r", )
17057+build_mmio_read(__intentional_overflow(-1) __readw, "w", unsigned short, "=r", )
17058+build_mmio_read(__intentional_overflow(-1) __readl, "l", unsigned int, "=r", )
17059
17060 build_mmio_write(writeb, "b", unsigned char, "q", :"memory")
17061 build_mmio_write(writew, "w", unsigned short, "r", :"memory")
17062@@ -113,7 +113,7 @@ build_mmio_write(writeq, "q", unsigned long, "r", :"memory")
17063 * this function
17064 */
17065
17066-static inline phys_addr_t virt_to_phys(volatile void *address)
17067+static inline phys_addr_t __intentional_overflow(-1) virt_to_phys(volatile void *address)
17068 {
17069 return __pa(address);
17070 }
17071@@ -189,7 +189,7 @@ static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
17072 return ioremap_nocache(offset, size);
17073 }
17074
17075-extern void iounmap(volatile void __iomem *addr);
17076+extern void iounmap(const volatile void __iomem *addr);
17077
17078 extern void set_iounmap_nonlazy(void);
17079
17080@@ -199,6 +199,17 @@ extern void set_iounmap_nonlazy(void);
17081
17082 #include <linux/vmalloc.h>
17083
17084+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
17085+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
17086+{
17087+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
17088+}
17089+
17090+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
17091+{
17092+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
17093+}
17094+
17095 /*
17096 * Convert a virtual cached pointer to an uncached pointer
17097 */
17098diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
17099index 0a8b519..80e7d5b 100644
17100--- a/arch/x86/include/asm/irqflags.h
17101+++ b/arch/x86/include/asm/irqflags.h
17102@@ -141,6 +141,11 @@ static inline notrace unsigned long arch_local_irq_save(void)
17103 sti; \
17104 sysexit
17105
17106+#define GET_CR0_INTO_RDI mov %cr0, %rdi
17107+#define SET_RDI_INTO_CR0 mov %rdi, %cr0
17108+#define GET_CR3_INTO_RDI mov %cr3, %rdi
17109+#define SET_RDI_INTO_CR3 mov %rdi, %cr3
17110+
17111 #else
17112 #define INTERRUPT_RETURN iret
17113 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
17114diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
17115index 4421b5d..8543006 100644
17116--- a/arch/x86/include/asm/kprobes.h
17117+++ b/arch/x86/include/asm/kprobes.h
17118@@ -37,13 +37,8 @@ typedef u8 kprobe_opcode_t;
17119 #define RELATIVEJUMP_SIZE 5
17120 #define RELATIVECALL_OPCODE 0xe8
17121 #define RELATIVE_ADDR_SIZE 4
17122-#define MAX_STACK_SIZE 64
17123-#define MIN_STACK_SIZE(ADDR) \
17124- (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
17125- THREAD_SIZE - (unsigned long)(ADDR))) \
17126- ? (MAX_STACK_SIZE) \
17127- : (((unsigned long)current_thread_info()) + \
17128- THREAD_SIZE - (unsigned long)(ADDR)))
17129+#define MAX_STACK_SIZE 64UL
17130+#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
17131
17132 #define flush_insn_slot(p) do { } while (0)
17133
17134diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
17135index 4ad6560..75c7bdd 100644
17136--- a/arch/x86/include/asm/local.h
17137+++ b/arch/x86/include/asm/local.h
17138@@ -10,33 +10,97 @@ typedef struct {
17139 atomic_long_t a;
17140 } local_t;
17141
17142+typedef struct {
17143+ atomic_long_unchecked_t a;
17144+} local_unchecked_t;
17145+
17146 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
17147
17148 #define local_read(l) atomic_long_read(&(l)->a)
17149+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
17150 #define local_set(l, i) atomic_long_set(&(l)->a, (i))
17151+#define local_set_unchecked(l, i) atomic_long_set_unchecked(&(l)->a, (i))
17152
17153 static inline void local_inc(local_t *l)
17154 {
17155- asm volatile(_ASM_INC "%0"
17156+ asm volatile(_ASM_INC "%0\n"
17157+
17158+#ifdef CONFIG_PAX_REFCOUNT
17159+ "jno 0f\n"
17160+ _ASM_DEC "%0\n"
17161+ "int $4\n0:\n"
17162+ _ASM_EXTABLE(0b, 0b)
17163+#endif
17164+
17165+ : "+m" (l->a.counter));
17166+}
17167+
17168+static inline void local_inc_unchecked(local_unchecked_t *l)
17169+{
17170+ asm volatile(_ASM_INC "%0\n"
17171 : "+m" (l->a.counter));
17172 }
17173
17174 static inline void local_dec(local_t *l)
17175 {
17176- asm volatile(_ASM_DEC "%0"
17177+ asm volatile(_ASM_DEC "%0\n"
17178+
17179+#ifdef CONFIG_PAX_REFCOUNT
17180+ "jno 0f\n"
17181+ _ASM_INC "%0\n"
17182+ "int $4\n0:\n"
17183+ _ASM_EXTABLE(0b, 0b)
17184+#endif
17185+
17186+ : "+m" (l->a.counter));
17187+}
17188+
17189+static inline void local_dec_unchecked(local_unchecked_t *l)
17190+{
17191+ asm volatile(_ASM_DEC "%0\n"
17192 : "+m" (l->a.counter));
17193 }
17194
17195 static inline void local_add(long i, local_t *l)
17196 {
17197- asm volatile(_ASM_ADD "%1,%0"
17198+ asm volatile(_ASM_ADD "%1,%0\n"
17199+
17200+#ifdef CONFIG_PAX_REFCOUNT
17201+ "jno 0f\n"
17202+ _ASM_SUB "%1,%0\n"
17203+ "int $4\n0:\n"
17204+ _ASM_EXTABLE(0b, 0b)
17205+#endif
17206+
17207+ : "+m" (l->a.counter)
17208+ : "ir" (i));
17209+}
17210+
17211+static inline void local_add_unchecked(long i, local_unchecked_t *l)
17212+{
17213+ asm volatile(_ASM_ADD "%1,%0\n"
17214 : "+m" (l->a.counter)
17215 : "ir" (i));
17216 }
17217
17218 static inline void local_sub(long i, local_t *l)
17219 {
17220- asm volatile(_ASM_SUB "%1,%0"
17221+ asm volatile(_ASM_SUB "%1,%0\n"
17222+
17223+#ifdef CONFIG_PAX_REFCOUNT
17224+ "jno 0f\n"
17225+ _ASM_ADD "%1,%0\n"
17226+ "int $4\n0:\n"
17227+ _ASM_EXTABLE(0b, 0b)
17228+#endif
17229+
17230+ : "+m" (l->a.counter)
17231+ : "ir" (i));
17232+}
17233+
17234+static inline void local_sub_unchecked(long i, local_unchecked_t *l)
17235+{
17236+ asm volatile(_ASM_SUB "%1,%0\n"
17237 : "+m" (l->a.counter)
17238 : "ir" (i));
17239 }
17240@@ -52,7 +116,7 @@ static inline void local_sub(long i, local_t *l)
17241 */
17242 static inline int local_sub_and_test(long i, local_t *l)
17243 {
17244- GEN_BINARY_RMWcc(_ASM_SUB, l->a.counter, "er", i, "%0", "e");
17245+ GEN_BINARY_RMWcc(_ASM_SUB, _ASM_ADD, l->a.counter, "er", i, "%0", "e");
17246 }
17247
17248 /**
17249@@ -65,7 +129,7 @@ static inline int local_sub_and_test(long i, local_t *l)
17250 */
17251 static inline int local_dec_and_test(local_t *l)
17252 {
17253- GEN_UNARY_RMWcc(_ASM_DEC, l->a.counter, "%0", "e");
17254+ GEN_UNARY_RMWcc(_ASM_DEC, _ASM_INC, l->a.counter, "%0", "e");
17255 }
17256
17257 /**
17258@@ -78,7 +142,7 @@ static inline int local_dec_and_test(local_t *l)
17259 */
17260 static inline int local_inc_and_test(local_t *l)
17261 {
17262- GEN_UNARY_RMWcc(_ASM_INC, l->a.counter, "%0", "e");
17263+ GEN_UNARY_RMWcc(_ASM_INC, _ASM_DEC, l->a.counter, "%0", "e");
17264 }
17265
17266 /**
17267@@ -92,7 +156,7 @@ static inline int local_inc_and_test(local_t *l)
17268 */
17269 static inline int local_add_negative(long i, local_t *l)
17270 {
17271- GEN_BINARY_RMWcc(_ASM_ADD, l->a.counter, "er", i, "%0", "s");
17272+ GEN_BINARY_RMWcc(_ASM_ADD, _ASM_SUB, l->a.counter, "er", i, "%0", "s");
17273 }
17274
17275 /**
17276@@ -105,6 +169,30 @@ static inline int local_add_negative(long i, local_t *l)
17277 static inline long local_add_return(long i, local_t *l)
17278 {
17279 long __i = i;
17280+ asm volatile(_ASM_XADD "%0, %1\n"
17281+
17282+#ifdef CONFIG_PAX_REFCOUNT
17283+ "jno 0f\n"
17284+ _ASM_MOV "%0,%1\n"
17285+ "int $4\n0:\n"
17286+ _ASM_EXTABLE(0b, 0b)
17287+#endif
17288+
17289+ : "+r" (i), "+m" (l->a.counter)
17290+ : : "memory");
17291+ return i + __i;
17292+}
17293+
17294+/**
17295+ * local_add_return_unchecked - add and return
17296+ * @i: integer value to add
17297+ * @l: pointer to type local_unchecked_t
17298+ *
17299+ * Atomically adds @i to @l and returns @i + @l
17300+ */
17301+static inline long local_add_return_unchecked(long i, local_unchecked_t *l)
17302+{
17303+ long __i = i;
17304 asm volatile(_ASM_XADD "%0, %1;"
17305 : "+r" (i), "+m" (l->a.counter)
17306 : : "memory");
17307@@ -121,6 +209,8 @@ static inline long local_sub_return(long i, local_t *l)
17308
17309 #define local_cmpxchg(l, o, n) \
17310 (cmpxchg_local(&((l)->a.counter), (o), (n)))
17311+#define local_cmpxchg_unchecked(l, o, n) \
17312+ (cmpxchg_local(&((l)->a.counter), (o), (n)))
17313 /* Always has a lock prefix */
17314 #define local_xchg(l, n) (xchg(&((l)->a.counter), (n)))
17315
17316diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
17317new file mode 100644
17318index 0000000..2bfd3ba
17319--- /dev/null
17320+++ b/arch/x86/include/asm/mman.h
17321@@ -0,0 +1,15 @@
17322+#ifndef _X86_MMAN_H
17323+#define _X86_MMAN_H
17324+
17325+#include <uapi/asm/mman.h>
17326+
17327+#ifdef __KERNEL__
17328+#ifndef __ASSEMBLY__
17329+#ifdef CONFIG_X86_32
17330+#define arch_mmap_check i386_mmap_check
17331+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags);
17332+#endif
17333+#endif
17334+#endif
17335+
17336+#endif /* X86_MMAN_H */
17337diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
17338index 09b9620..923aecd 100644
17339--- a/arch/x86/include/asm/mmu.h
17340+++ b/arch/x86/include/asm/mmu.h
17341@@ -9,7 +9,7 @@
17342 * we put the segment information here.
17343 */
17344 typedef struct {
17345- void *ldt;
17346+ struct desc_struct *ldt;
17347 int size;
17348
17349 #ifdef CONFIG_X86_64
17350@@ -18,7 +18,19 @@ typedef struct {
17351 #endif
17352
17353 struct mutex lock;
17354- void __user *vdso;
17355+ unsigned long vdso;
17356+
17357+#ifdef CONFIG_X86_32
17358+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
17359+ unsigned long user_cs_base;
17360+ unsigned long user_cs_limit;
17361+
17362+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
17363+ cpumask_t cpu_user_cs_mask;
17364+#endif
17365+
17366+#endif
17367+#endif
17368
17369 atomic_t perf_rdpmc_allowed; /* nonzero if rdpmc is allowed */
17370 } mm_context_t;
17371diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
17372index 883f6b93..bb405b5 100644
17373--- a/arch/x86/include/asm/mmu_context.h
17374+++ b/arch/x86/include/asm/mmu_context.h
17375@@ -42,6 +42,20 @@ void destroy_context(struct mm_struct *mm);
17376
17377 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
17378 {
17379+
17380+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17381+ if (!(static_cpu_has(X86_FEATURE_PCID))) {
17382+ unsigned int i;
17383+ pgd_t *pgd;
17384+
17385+ pax_open_kernel();
17386+ pgd = get_cpu_pgd(smp_processor_id(), kernel);
17387+ for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
17388+ set_pgd_batched(pgd+i, native_make_pgd(0));
17389+ pax_close_kernel();
17390+ }
17391+#endif
17392+
17393 #ifdef CONFIG_SMP
17394 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
17395 this_cpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
17396@@ -52,16 +66,59 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
17397 struct task_struct *tsk)
17398 {
17399 unsigned cpu = smp_processor_id();
17400+#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
17401+ int tlbstate = TLBSTATE_OK;
17402+#endif
17403
17404 if (likely(prev != next)) {
17405 #ifdef CONFIG_SMP
17406+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
17407+ tlbstate = this_cpu_read(cpu_tlbstate.state);
17408+#endif
17409 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
17410 this_cpu_write(cpu_tlbstate.active_mm, next);
17411 #endif
17412 cpumask_set_cpu(cpu, mm_cpumask(next));
17413
17414 /* Re-load page tables */
17415+#ifdef CONFIG_PAX_PER_CPU_PGD
17416+ pax_open_kernel();
17417+
17418+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17419+ if (static_cpu_has(X86_FEATURE_PCID))
17420+ __clone_user_pgds(get_cpu_pgd(cpu, user), next->pgd);
17421+ else
17422+#endif
17423+
17424+ __clone_user_pgds(get_cpu_pgd(cpu, kernel), next->pgd);
17425+ __shadow_user_pgds(get_cpu_pgd(cpu, kernel) + USER_PGD_PTRS, next->pgd);
17426+ pax_close_kernel();
17427+ BUG_ON((__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL) != (read_cr3() & __PHYSICAL_MASK) && (__pa(get_cpu_pgd(cpu, user)) | PCID_USER) != (read_cr3() & __PHYSICAL_MASK));
17428+
17429+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17430+ if (static_cpu_has(X86_FEATURE_PCID)) {
17431+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
17432+ u64 descriptor[2];
17433+ descriptor[0] = PCID_USER;
17434+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
17435+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF)) {
17436+ descriptor[0] = PCID_KERNEL;
17437+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
17438+ }
17439+ } else {
17440+ write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
17441+ if (static_cpu_has(X86_FEATURE_STRONGUDEREF))
17442+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
17443+ else
17444+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
17445+ }
17446+ } else
17447+#endif
17448+
17449+ load_cr3(get_cpu_pgd(cpu, kernel));
17450+#else
17451 load_cr3(next->pgd);
17452+#endif
17453 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
17454
17455 /* Stop flush ipis for the previous mm */
17456@@ -84,9 +141,67 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
17457 */
17458 if (unlikely(prev->context.ldt != next->context.ldt))
17459 load_LDT_nolock(&next->context);
17460+
17461+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
17462+ if (!(__supported_pte_mask & _PAGE_NX)) {
17463+ smp_mb__before_atomic();
17464+ cpumask_clear_cpu(cpu, &prev->context.cpu_user_cs_mask);
17465+ smp_mb__after_atomic();
17466+ cpumask_set_cpu(cpu, &next->context.cpu_user_cs_mask);
17467+ }
17468+#endif
17469+
17470+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
17471+ if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
17472+ prev->context.user_cs_limit != next->context.user_cs_limit))
17473+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
17474+#ifdef CONFIG_SMP
17475+ else if (unlikely(tlbstate != TLBSTATE_OK))
17476+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
17477+#endif
17478+#endif
17479+
17480 }
17481+ else {
17482+
17483+#ifdef CONFIG_PAX_PER_CPU_PGD
17484+ pax_open_kernel();
17485+
17486+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17487+ if (static_cpu_has(X86_FEATURE_PCID))
17488+ __clone_user_pgds(get_cpu_pgd(cpu, user), next->pgd);
17489+ else
17490+#endif
17491+
17492+ __clone_user_pgds(get_cpu_pgd(cpu, kernel), next->pgd);
17493+ __shadow_user_pgds(get_cpu_pgd(cpu, kernel) + USER_PGD_PTRS, next->pgd);
17494+ pax_close_kernel();
17495+ BUG_ON((__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL) != (read_cr3() & __PHYSICAL_MASK) && (__pa(get_cpu_pgd(cpu, user)) | PCID_USER) != (read_cr3() & __PHYSICAL_MASK));
17496+
17497+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17498+ if (static_cpu_has(X86_FEATURE_PCID)) {
17499+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
17500+ u64 descriptor[2];
17501+ descriptor[0] = PCID_USER;
17502+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
17503+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF)) {
17504+ descriptor[0] = PCID_KERNEL;
17505+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
17506+ }
17507+ } else {
17508+ write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
17509+ if (static_cpu_has(X86_FEATURE_STRONGUDEREF))
17510+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
17511+ else
17512+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
17513+ }
17514+ } else
17515+#endif
17516+
17517+ load_cr3(get_cpu_pgd(cpu, kernel));
17518+#endif
17519+
17520 #ifdef CONFIG_SMP
17521- else {
17522 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
17523 BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
17524
17525@@ -103,13 +218,30 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
17526 * tlb flush IPI delivery. We must reload CR3
17527 * to make sure to use no freed page tables.
17528 */
17529+
17530+#ifndef CONFIG_PAX_PER_CPU_PGD
17531 load_cr3(next->pgd);
17532 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
17533+#endif
17534+
17535 load_mm_cr4(next);
17536 load_LDT_nolock(&next->context);
17537+
17538+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
17539+ if (!(__supported_pte_mask & _PAGE_NX))
17540+ cpumask_set_cpu(cpu, &next->context.cpu_user_cs_mask);
17541+#endif
17542+
17543+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
17544+#ifdef CONFIG_PAX_PAGEEXEC
17545+ if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
17546+#endif
17547+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
17548+#endif
17549+
17550 }
17551+#endif
17552 }
17553-#endif
17554 }
17555
17556 #define activate_mm(prev, next) \
17557diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
17558index e3b7819..b257c64 100644
17559--- a/arch/x86/include/asm/module.h
17560+++ b/arch/x86/include/asm/module.h
17561@@ -5,6 +5,7 @@
17562
17563 #ifdef CONFIG_X86_64
17564 /* X86_64 does not define MODULE_PROC_FAMILY */
17565+#define MODULE_PROC_FAMILY ""
17566 #elif defined CONFIG_M486
17567 #define MODULE_PROC_FAMILY "486 "
17568 #elif defined CONFIG_M586
17569@@ -57,8 +58,20 @@
17570 #error unknown processor family
17571 #endif
17572
17573-#ifdef CONFIG_X86_32
17574-# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
17575+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
17576+#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
17577+#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
17578+#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
17579+#else
17580+#define MODULE_PAX_KERNEXEC ""
17581 #endif
17582
17583+#ifdef CONFIG_PAX_MEMORY_UDEREF
17584+#define MODULE_PAX_UDEREF "UDEREF "
17585+#else
17586+#define MODULE_PAX_UDEREF ""
17587+#endif
17588+
17589+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
17590+
17591 #endif /* _ASM_X86_MODULE_H */
17592diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h
17593index 5f2fc44..106caa6 100644
17594--- a/arch/x86/include/asm/nmi.h
17595+++ b/arch/x86/include/asm/nmi.h
17596@@ -36,26 +36,35 @@ enum {
17597
17598 typedef int (*nmi_handler_t)(unsigned int, struct pt_regs *);
17599
17600+struct nmiaction;
17601+
17602+struct nmiwork {
17603+ const struct nmiaction *action;
17604+ u64 max_duration;
17605+ struct irq_work irq_work;
17606+};
17607+
17608 struct nmiaction {
17609 struct list_head list;
17610 nmi_handler_t handler;
17611- u64 max_duration;
17612- struct irq_work irq_work;
17613 unsigned long flags;
17614 const char *name;
17615-};
17616+ struct nmiwork *work;
17617+} __do_const;
17618
17619 #define register_nmi_handler(t, fn, fg, n, init...) \
17620 ({ \
17621- static struct nmiaction init fn##_na = { \
17622+ static struct nmiwork fn##_nw; \
17623+ static const struct nmiaction init fn##_na = { \
17624 .handler = (fn), \
17625 .name = (n), \
17626 .flags = (fg), \
17627+ .work = &fn##_nw, \
17628 }; \
17629 __register_nmi_handler((t), &fn##_na); \
17630 })
17631
17632-int __register_nmi_handler(unsigned int, struct nmiaction *);
17633+int __register_nmi_handler(unsigned int, const struct nmiaction *);
17634
17635 void unregister_nmi_handler(unsigned int, const char *);
17636
17637diff --git a/arch/x86/include/asm/page.h b/arch/x86/include/asm/page.h
17638index 802dde3..9183e68 100644
17639--- a/arch/x86/include/asm/page.h
17640+++ b/arch/x86/include/asm/page.h
17641@@ -52,6 +52,7 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
17642 __phys_addr_symbol(__phys_reloc_hide((unsigned long)(x)))
17643
17644 #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
17645+#define __early_va(x) ((void *)((unsigned long)(x)+__START_KERNEL_map - phys_base))
17646
17647 #define __boot_va(x) __va(x)
17648 #define __boot_pa(x) __pa(x)
17649@@ -60,11 +61,21 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
17650 * virt_to_page(kaddr) returns a valid pointer if and only if
17651 * virt_addr_valid(kaddr) returns true.
17652 */
17653-#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
17654 #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
17655 extern bool __virt_addr_valid(unsigned long kaddr);
17656 #define virt_addr_valid(kaddr) __virt_addr_valid((unsigned long) (kaddr))
17657
17658+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
17659+#define virt_to_page(kaddr) \
17660+ ({ \
17661+ const void *__kaddr = (const void *)(kaddr); \
17662+ BUG_ON(!virt_addr_valid(__kaddr)); \
17663+ pfn_to_page(__pa(__kaddr) >> PAGE_SHIFT); \
17664+ })
17665+#else
17666+#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
17667+#endif
17668+
17669 #endif /* __ASSEMBLY__ */
17670
17671 #include <asm-generic/memory_model.h>
17672diff --git a/arch/x86/include/asm/page_64.h b/arch/x86/include/asm/page_64.h
17673index b3bebf9..13ac22e 100644
17674--- a/arch/x86/include/asm/page_64.h
17675+++ b/arch/x86/include/asm/page_64.h
17676@@ -7,9 +7,9 @@
17677
17678 /* duplicated to the one in bootmem.h */
17679 extern unsigned long max_pfn;
17680-extern unsigned long phys_base;
17681+extern const unsigned long phys_base;
17682
17683-static inline unsigned long __phys_addr_nodebug(unsigned long x)
17684+static inline unsigned long __intentional_overflow(-1) __phys_addr_nodebug(unsigned long x)
17685 {
17686 unsigned long y = x - __START_KERNEL_map;
17687
17688@@ -20,8 +20,8 @@ static inline unsigned long __phys_addr_nodebug(unsigned long x)
17689 }
17690
17691 #ifdef CONFIG_DEBUG_VIRTUAL
17692-extern unsigned long __phys_addr(unsigned long);
17693-extern unsigned long __phys_addr_symbol(unsigned long);
17694+extern unsigned long __intentional_overflow(-1) __phys_addr(unsigned long);
17695+extern unsigned long __intentional_overflow(-1) __phys_addr_symbol(unsigned long);
17696 #else
17697 #define __phys_addr(x) __phys_addr_nodebug(x)
17698 #define __phys_addr_symbol(x) \
17699diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
17700index 965c47d..ffe0af8 100644
17701--- a/arch/x86/include/asm/paravirt.h
17702+++ b/arch/x86/include/asm/paravirt.h
17703@@ -560,7 +560,7 @@ static inline pmd_t __pmd(pmdval_t val)
17704 return (pmd_t) { ret };
17705 }
17706
17707-static inline pmdval_t pmd_val(pmd_t pmd)
17708+static inline __intentional_overflow(-1) pmdval_t pmd_val(pmd_t pmd)
17709 {
17710 pmdval_t ret;
17711
17712@@ -626,6 +626,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
17713 val);
17714 }
17715
17716+static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
17717+{
17718+ pgdval_t val = native_pgd_val(pgd);
17719+
17720+ if (sizeof(pgdval_t) > sizeof(long))
17721+ PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
17722+ val, (u64)val >> 32);
17723+ else
17724+ PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
17725+ val);
17726+}
17727+
17728 static inline void pgd_clear(pgd_t *pgdp)
17729 {
17730 set_pgd(pgdp, __pgd(0));
17731@@ -710,6 +722,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
17732 pv_mmu_ops.set_fixmap(idx, phys, flags);
17733 }
17734
17735+#ifdef CONFIG_PAX_KERNEXEC
17736+static inline unsigned long pax_open_kernel(void)
17737+{
17738+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
17739+}
17740+
17741+static inline unsigned long pax_close_kernel(void)
17742+{
17743+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
17744+}
17745+#else
17746+static inline unsigned long pax_open_kernel(void) { return 0; }
17747+static inline unsigned long pax_close_kernel(void) { return 0; }
17748+#endif
17749+
17750 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
17751
17752 static __always_inline void __ticket_lock_spinning(struct arch_spinlock *lock,
17753@@ -906,7 +933,7 @@ extern void default_banner(void);
17754
17755 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
17756 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
17757-#define PARA_INDIRECT(addr) *%cs:addr
17758+#define PARA_INDIRECT(addr) *%ss:addr
17759 #endif
17760
17761 #define INTERRUPT_RETURN \
17762@@ -981,6 +1008,21 @@ extern void default_banner(void);
17763 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
17764 CLBR_NONE, \
17765 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
17766+
17767+#define GET_CR0_INTO_RDI \
17768+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
17769+ mov %rax,%rdi
17770+
17771+#define SET_RDI_INTO_CR0 \
17772+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
17773+
17774+#define GET_CR3_INTO_RDI \
17775+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
17776+ mov %rax,%rdi
17777+
17778+#define SET_RDI_INTO_CR3 \
17779+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
17780+
17781 #endif /* CONFIG_X86_32 */
17782
17783 #endif /* __ASSEMBLY__ */
17784diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
17785index 7549b8b..f0edfda 100644
17786--- a/arch/x86/include/asm/paravirt_types.h
17787+++ b/arch/x86/include/asm/paravirt_types.h
17788@@ -84,7 +84,7 @@ struct pv_init_ops {
17789 */
17790 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
17791 unsigned long addr, unsigned len);
17792-};
17793+} __no_const __no_randomize_layout;
17794
17795
17796 struct pv_lazy_ops {
17797@@ -92,13 +92,13 @@ struct pv_lazy_ops {
17798 void (*enter)(void);
17799 void (*leave)(void);
17800 void (*flush)(void);
17801-};
17802+} __no_randomize_layout;
17803
17804 struct pv_time_ops {
17805 unsigned long long (*sched_clock)(void);
17806 unsigned long long (*steal_clock)(int cpu);
17807 unsigned long (*get_tsc_khz)(void);
17808-};
17809+} __no_const __no_randomize_layout;
17810
17811 struct pv_cpu_ops {
17812 /* hooks for various privileged instructions */
17813@@ -192,7 +192,7 @@ struct pv_cpu_ops {
17814
17815 void (*start_context_switch)(struct task_struct *prev);
17816 void (*end_context_switch)(struct task_struct *next);
17817-};
17818+} __no_const __no_randomize_layout;
17819
17820 struct pv_irq_ops {
17821 /*
17822@@ -215,7 +215,7 @@ struct pv_irq_ops {
17823 #ifdef CONFIG_X86_64
17824 void (*adjust_exception_frame)(void);
17825 #endif
17826-};
17827+} __no_randomize_layout;
17828
17829 struct pv_apic_ops {
17830 #ifdef CONFIG_X86_LOCAL_APIC
17831@@ -223,7 +223,7 @@ struct pv_apic_ops {
17832 unsigned long start_eip,
17833 unsigned long start_esp);
17834 #endif
17835-};
17836+} __no_const __no_randomize_layout;
17837
17838 struct pv_mmu_ops {
17839 unsigned long (*read_cr2)(void);
17840@@ -313,6 +313,7 @@ struct pv_mmu_ops {
17841 struct paravirt_callee_save make_pud;
17842
17843 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
17844+ void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
17845 #endif /* PAGETABLE_LEVELS == 4 */
17846 #endif /* PAGETABLE_LEVELS >= 3 */
17847
17848@@ -324,7 +325,13 @@ struct pv_mmu_ops {
17849 an mfn. We can tell which is which from the index. */
17850 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
17851 phys_addr_t phys, pgprot_t flags);
17852-};
17853+
17854+#ifdef CONFIG_PAX_KERNEXEC
17855+ unsigned long (*pax_open_kernel)(void);
17856+ unsigned long (*pax_close_kernel)(void);
17857+#endif
17858+
17859+} __no_randomize_layout;
17860
17861 struct arch_spinlock;
17862 #ifdef CONFIG_SMP
17863@@ -336,11 +343,14 @@ typedef u16 __ticket_t;
17864 struct pv_lock_ops {
17865 struct paravirt_callee_save lock_spinning;
17866 void (*unlock_kick)(struct arch_spinlock *lock, __ticket_t ticket);
17867-};
17868+} __no_randomize_layout;
17869
17870 /* This contains all the paravirt structures: we get a convenient
17871 * number for each function using the offset which we use to indicate
17872- * what to patch. */
17873+ * what to patch.
17874+ * shouldn't be randomized due to the "NEAT TRICK" in paravirt.c
17875+ */
17876+
17877 struct paravirt_patch_template {
17878 struct pv_init_ops pv_init_ops;
17879 struct pv_time_ops pv_time_ops;
17880@@ -349,7 +359,7 @@ struct paravirt_patch_template {
17881 struct pv_apic_ops pv_apic_ops;
17882 struct pv_mmu_ops pv_mmu_ops;
17883 struct pv_lock_ops pv_lock_ops;
17884-};
17885+} __no_randomize_layout;
17886
17887 extern struct pv_info pv_info;
17888 extern struct pv_init_ops pv_init_ops;
17889diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
17890index c4412e9..90e88c5 100644
17891--- a/arch/x86/include/asm/pgalloc.h
17892+++ b/arch/x86/include/asm/pgalloc.h
17893@@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
17894 pmd_t *pmd, pte_t *pte)
17895 {
17896 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
17897+ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
17898+}
17899+
17900+static inline void pmd_populate_user(struct mm_struct *mm,
17901+ pmd_t *pmd, pte_t *pte)
17902+{
17903+ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
17904 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
17905 }
17906
17907@@ -108,12 +115,22 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
17908
17909 #ifdef CONFIG_X86_PAE
17910 extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
17911+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
17912+{
17913+ pud_populate(mm, pudp, pmd);
17914+}
17915 #else /* !CONFIG_X86_PAE */
17916 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
17917 {
17918 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
17919 set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
17920 }
17921+
17922+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
17923+{
17924+ paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
17925+ set_pud(pud, __pud(_KERNPG_TABLE | __pa(pmd)));
17926+}
17927 #endif /* CONFIG_X86_PAE */
17928
17929 #if PAGETABLE_LEVELS > 3
17930@@ -123,6 +140,12 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
17931 set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)));
17932 }
17933
17934+static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
17935+{
17936+ paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT);
17937+ set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(pud)));
17938+}
17939+
17940 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
17941 {
17942 return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
17943diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
17944index fd74a11..35fd5af 100644
17945--- a/arch/x86/include/asm/pgtable-2level.h
17946+++ b/arch/x86/include/asm/pgtable-2level.h
17947@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
17948
17949 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
17950 {
17951+ pax_open_kernel();
17952 *pmdp = pmd;
17953+ pax_close_kernel();
17954 }
17955
17956 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
17957diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
17958index cdaa58c..e61122b 100644
17959--- a/arch/x86/include/asm/pgtable-3level.h
17960+++ b/arch/x86/include/asm/pgtable-3level.h
17961@@ -92,12 +92,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
17962
17963 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
17964 {
17965+ pax_open_kernel();
17966 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
17967+ pax_close_kernel();
17968 }
17969
17970 static inline void native_set_pud(pud_t *pudp, pud_t pud)
17971 {
17972+ pax_open_kernel();
17973 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
17974+ pax_close_kernel();
17975 }
17976
17977 /*
17978diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
17979index a0c35bf..7045c6a 100644
17980--- a/arch/x86/include/asm/pgtable.h
17981+++ b/arch/x86/include/asm/pgtable.h
17982@@ -47,6 +47,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
17983
17984 #ifndef __PAGETABLE_PUD_FOLDED
17985 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
17986+#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
17987 #define pgd_clear(pgd) native_pgd_clear(pgd)
17988 #endif
17989
17990@@ -84,12 +85,53 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
17991
17992 #define arch_end_context_switch(prev) do {} while(0)
17993
17994+#define pax_open_kernel() native_pax_open_kernel()
17995+#define pax_close_kernel() native_pax_close_kernel()
17996 #endif /* CONFIG_PARAVIRT */
17997
17998+#define __HAVE_ARCH_PAX_OPEN_KERNEL
17999+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
18000+
18001+#ifdef CONFIG_PAX_KERNEXEC
18002+static inline unsigned long native_pax_open_kernel(void)
18003+{
18004+ unsigned long cr0;
18005+
18006+ preempt_disable();
18007+ barrier();
18008+ cr0 = read_cr0() ^ X86_CR0_WP;
18009+ BUG_ON(cr0 & X86_CR0_WP);
18010+ write_cr0(cr0);
18011+ barrier();
18012+ return cr0 ^ X86_CR0_WP;
18013+}
18014+
18015+static inline unsigned long native_pax_close_kernel(void)
18016+{
18017+ unsigned long cr0;
18018+
18019+ barrier();
18020+ cr0 = read_cr0() ^ X86_CR0_WP;
18021+ BUG_ON(!(cr0 & X86_CR0_WP));
18022+ write_cr0(cr0);
18023+ barrier();
18024+ preempt_enable_no_resched();
18025+ return cr0 ^ X86_CR0_WP;
18026+}
18027+#else
18028+static inline unsigned long native_pax_open_kernel(void) { return 0; }
18029+static inline unsigned long native_pax_close_kernel(void) { return 0; }
18030+#endif
18031+
18032 /*
18033 * The following only work if pte_present() is true.
18034 * Undefined behaviour if not..
18035 */
18036+static inline int pte_user(pte_t pte)
18037+{
18038+ return pte_val(pte) & _PAGE_USER;
18039+}
18040+
18041 static inline int pte_dirty(pte_t pte)
18042 {
18043 return pte_flags(pte) & _PAGE_DIRTY;
18044@@ -150,6 +192,11 @@ static inline unsigned long pud_pfn(pud_t pud)
18045 return (pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT;
18046 }
18047
18048+static inline unsigned long pgd_pfn(pgd_t pgd)
18049+{
18050+ return (pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT;
18051+}
18052+
18053 #define pte_page(pte) pfn_to_page(pte_pfn(pte))
18054
18055 static inline int pmd_large(pmd_t pte)
18056@@ -203,9 +250,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
18057 return pte_clear_flags(pte, _PAGE_RW);
18058 }
18059
18060+static inline pte_t pte_mkread(pte_t pte)
18061+{
18062+ return __pte(pte_val(pte) | _PAGE_USER);
18063+}
18064+
18065 static inline pte_t pte_mkexec(pte_t pte)
18066 {
18067- return pte_clear_flags(pte, _PAGE_NX);
18068+#ifdef CONFIG_X86_PAE
18069+ if (__supported_pte_mask & _PAGE_NX)
18070+ return pte_clear_flags(pte, _PAGE_NX);
18071+ else
18072+#endif
18073+ return pte_set_flags(pte, _PAGE_USER);
18074+}
18075+
18076+static inline pte_t pte_exprotect(pte_t pte)
18077+{
18078+#ifdef CONFIG_X86_PAE
18079+ if (__supported_pte_mask & _PAGE_NX)
18080+ return pte_set_flags(pte, _PAGE_NX);
18081+ else
18082+#endif
18083+ return pte_clear_flags(pte, _PAGE_USER);
18084 }
18085
18086 static inline pte_t pte_mkdirty(pte_t pte)
18087@@ -420,6 +487,16 @@ pte_t *populate_extra_pte(unsigned long vaddr);
18088 #endif
18089
18090 #ifndef __ASSEMBLY__
18091+
18092+#ifdef CONFIG_PAX_PER_CPU_PGD
18093+extern pgd_t cpu_pgd[NR_CPUS][2][PTRS_PER_PGD];
18094+enum cpu_pgd_type {kernel = 0, user = 1};
18095+static inline pgd_t *get_cpu_pgd(unsigned int cpu, enum cpu_pgd_type type)
18096+{
18097+ return cpu_pgd[cpu][type];
18098+}
18099+#endif
18100+
18101 #include <linux/mm_types.h>
18102 #include <linux/mmdebug.h>
18103 #include <linux/log2.h>
18104@@ -571,7 +648,7 @@ static inline unsigned long pud_page_vaddr(pud_t pud)
18105 * Currently stuck as a macro due to indirect forward reference to
18106 * linux/mmzone.h's __section_mem_map_addr() definition:
18107 */
18108-#define pud_page(pud) pfn_to_page(pud_val(pud) >> PAGE_SHIFT)
18109+#define pud_page(pud) pfn_to_page((pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT)
18110
18111 /* Find an entry in the second-level page table.. */
18112 static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
18113@@ -611,7 +688,7 @@ static inline unsigned long pgd_page_vaddr(pgd_t pgd)
18114 * Currently stuck as a macro due to indirect forward reference to
18115 * linux/mmzone.h's __section_mem_map_addr() definition:
18116 */
18117-#define pgd_page(pgd) pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT)
18118+#define pgd_page(pgd) pfn_to_page((pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT)
18119
18120 /* to find an entry in a page-table-directory. */
18121 static inline unsigned long pud_index(unsigned long address)
18122@@ -626,7 +703,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
18123
18124 static inline int pgd_bad(pgd_t pgd)
18125 {
18126- return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
18127+ return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
18128 }
18129
18130 static inline int pgd_none(pgd_t pgd)
18131@@ -649,7 +726,12 @@ static inline int pgd_none(pgd_t pgd)
18132 * pgd_offset() returns a (pgd_t *)
18133 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
18134 */
18135-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
18136+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
18137+
18138+#ifdef CONFIG_PAX_PER_CPU_PGD
18139+#define pgd_offset_cpu(cpu, type, address) (get_cpu_pgd(cpu, type) + pgd_index(address))
18140+#endif
18141+
18142 /*
18143 * a shortcut which implies the use of the kernel's pgd, instead
18144 * of a process's
18145@@ -660,6 +742,25 @@ static inline int pgd_none(pgd_t pgd)
18146 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
18147 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
18148
18149+#ifdef CONFIG_X86_32
18150+#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
18151+#else
18152+#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
18153+#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
18154+
18155+#ifdef CONFIG_PAX_MEMORY_UDEREF
18156+#ifdef __ASSEMBLY__
18157+#define pax_user_shadow_base pax_user_shadow_base(%rip)
18158+#else
18159+extern unsigned long pax_user_shadow_base;
18160+extern pgdval_t clone_pgd_mask;
18161+#endif
18162+#else
18163+#define pax_user_shadow_base (0UL)
18164+#endif
18165+
18166+#endif
18167+
18168 #ifndef __ASSEMBLY__
18169
18170 extern int direct_gbpages;
18171@@ -826,11 +927,24 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
18172 * dst and src can be on the same page, but the range must not overlap,
18173 * and must not cross a page boundary.
18174 */
18175-static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
18176+static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
18177 {
18178- memcpy(dst, src, count * sizeof(pgd_t));
18179+ pax_open_kernel();
18180+ while (count--)
18181+ *dst++ = *src++;
18182+ pax_close_kernel();
18183 }
18184
18185+#ifdef CONFIG_PAX_PER_CPU_PGD
18186+extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src);
18187+#endif
18188+
18189+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18190+extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src);
18191+#else
18192+static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src) {}
18193+#endif
18194+
18195 #define PTE_SHIFT ilog2(PTRS_PER_PTE)
18196 static inline int page_level_shift(enum pg_level level)
18197 {
18198diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
18199index b6c0b40..3535d47 100644
18200--- a/arch/x86/include/asm/pgtable_32.h
18201+++ b/arch/x86/include/asm/pgtable_32.h
18202@@ -25,9 +25,6 @@
18203 struct mm_struct;
18204 struct vm_area_struct;
18205
18206-extern pgd_t swapper_pg_dir[1024];
18207-extern pgd_t initial_page_table[1024];
18208-
18209 static inline void pgtable_cache_init(void) { }
18210 static inline void check_pgt_cache(void) { }
18211 void paging_init(void);
18212@@ -45,6 +42,12 @@ void paging_init(void);
18213 # include <asm/pgtable-2level.h>
18214 #endif
18215
18216+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
18217+extern pgd_t initial_page_table[PTRS_PER_PGD];
18218+#ifdef CONFIG_X86_PAE
18219+extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
18220+#endif
18221+
18222 #if defined(CONFIG_HIGHPTE)
18223 #define pte_offset_map(dir, address) \
18224 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
18225@@ -59,12 +62,17 @@ void paging_init(void);
18226 /* Clear a kernel PTE and flush it from the TLB */
18227 #define kpte_clear_flush(ptep, vaddr) \
18228 do { \
18229+ pax_open_kernel(); \
18230 pte_clear(&init_mm, (vaddr), (ptep)); \
18231+ pax_close_kernel(); \
18232 __flush_tlb_one((vaddr)); \
18233 } while (0)
18234
18235 #endif /* !__ASSEMBLY__ */
18236
18237+#define HAVE_ARCH_UNMAPPED_AREA
18238+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
18239+
18240 /*
18241 * kern_addr_valid() is (1) for FLATMEM and (0) for
18242 * SPARSEMEM and DISCONTIGMEM
18243diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
18244index 9fb2f2b..b04b4bf 100644
18245--- a/arch/x86/include/asm/pgtable_32_types.h
18246+++ b/arch/x86/include/asm/pgtable_32_types.h
18247@@ -8,7 +8,7 @@
18248 */
18249 #ifdef CONFIG_X86_PAE
18250 # include <asm/pgtable-3level_types.h>
18251-# define PMD_SIZE (1UL << PMD_SHIFT)
18252+# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
18253 # define PMD_MASK (~(PMD_SIZE - 1))
18254 #else
18255 # include <asm/pgtable-2level_types.h>
18256@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
18257 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
18258 #endif
18259
18260+#ifdef CONFIG_PAX_KERNEXEC
18261+#ifndef __ASSEMBLY__
18262+extern unsigned char MODULES_EXEC_VADDR[];
18263+extern unsigned char MODULES_EXEC_END[];
18264+#endif
18265+#include <asm/boot.h>
18266+#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
18267+#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
18268+#else
18269+#define ktla_ktva(addr) (addr)
18270+#define ktva_ktla(addr) (addr)
18271+#endif
18272+
18273 #define MODULES_VADDR VMALLOC_START
18274 #define MODULES_END VMALLOC_END
18275 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
18276diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
18277index 2ee7811..55aca24 100644
18278--- a/arch/x86/include/asm/pgtable_64.h
18279+++ b/arch/x86/include/asm/pgtable_64.h
18280@@ -16,11 +16,16 @@
18281
18282 extern pud_t level3_kernel_pgt[512];
18283 extern pud_t level3_ident_pgt[512];
18284+extern pud_t level3_vmalloc_start_pgt[512];
18285+extern pud_t level3_vmalloc_end_pgt[512];
18286+extern pud_t level3_vmemmap_pgt[512];
18287+extern pud_t level2_vmemmap_pgt[512];
18288 extern pmd_t level2_kernel_pgt[512];
18289 extern pmd_t level2_fixmap_pgt[512];
18290-extern pmd_t level2_ident_pgt[512];
18291-extern pte_t level1_fixmap_pgt[512];
18292-extern pgd_t init_level4_pgt[];
18293+extern pmd_t level2_ident_pgt[2][512];
18294+extern pte_t level1_fixmap_pgt[3][512];
18295+extern pte_t level1_vsyscall_pgt[512];
18296+extern pgd_t init_level4_pgt[512];
18297
18298 #define swapper_pg_dir init_level4_pgt
18299
18300@@ -62,7 +67,9 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
18301
18302 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
18303 {
18304+ pax_open_kernel();
18305 *pmdp = pmd;
18306+ pax_close_kernel();
18307 }
18308
18309 static inline void native_pmd_clear(pmd_t *pmd)
18310@@ -98,7 +105,9 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
18311
18312 static inline void native_set_pud(pud_t *pudp, pud_t pud)
18313 {
18314+ pax_open_kernel();
18315 *pudp = pud;
18316+ pax_close_kernel();
18317 }
18318
18319 static inline void native_pud_clear(pud_t *pud)
18320@@ -108,6 +117,13 @@ static inline void native_pud_clear(pud_t *pud)
18321
18322 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
18323 {
18324+ pax_open_kernel();
18325+ *pgdp = pgd;
18326+ pax_close_kernel();
18327+}
18328+
18329+static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
18330+{
18331 *pgdp = pgd;
18332 }
18333
18334diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
18335index 602b602..acb53ed 100644
18336--- a/arch/x86/include/asm/pgtable_64_types.h
18337+++ b/arch/x86/include/asm/pgtable_64_types.h
18338@@ -61,11 +61,16 @@ typedef struct { pteval_t pte; } pte_t;
18339 #define MODULES_VADDR (__START_KERNEL_map + KERNEL_IMAGE_SIZE)
18340 #define MODULES_END _AC(0xffffffffff000000, UL)
18341 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
18342+#define MODULES_EXEC_VADDR MODULES_VADDR
18343+#define MODULES_EXEC_END MODULES_END
18344 #define ESPFIX_PGD_ENTRY _AC(-2, UL)
18345 #define ESPFIX_BASE_ADDR (ESPFIX_PGD_ENTRY << PGDIR_SHIFT)
18346 #define EFI_VA_START ( -4 * (_AC(1, UL) << 30))
18347 #define EFI_VA_END (-68 * (_AC(1, UL) << 30))
18348
18349+#define ktla_ktva(addr) (addr)
18350+#define ktva_ktla(addr) (addr)
18351+
18352 #define EARLY_DYNAMIC_PAGE_TABLES 64
18353
18354 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
18355diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
18356index 8c7c108..1c1b77f 100644
18357--- a/arch/x86/include/asm/pgtable_types.h
18358+++ b/arch/x86/include/asm/pgtable_types.h
18359@@ -85,8 +85,10 @@
18360
18361 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
18362 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
18363-#else
18364+#elif defined(CONFIG_KMEMCHECK) || defined(CONFIG_MEM_SOFT_DIRTY)
18365 #define _PAGE_NX (_AT(pteval_t, 0))
18366+#else
18367+#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
18368 #endif
18369
18370 #define _PAGE_PROTNONE (_AT(pteval_t, 1) << _PAGE_BIT_PROTNONE)
18371@@ -141,6 +143,9 @@ enum page_cache_mode {
18372 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
18373 _PAGE_ACCESSED)
18374
18375+#define PAGE_READONLY_NOEXEC PAGE_READONLY
18376+#define PAGE_SHARED_NOEXEC PAGE_SHARED
18377+
18378 #define __PAGE_KERNEL_EXEC \
18379 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
18380 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
18381@@ -148,7 +153,7 @@ enum page_cache_mode {
18382 #define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW)
18383 #define __PAGE_KERNEL_RX (__PAGE_KERNEL_EXEC & ~_PAGE_RW)
18384 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_NOCACHE)
18385-#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
18386+#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
18387 #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
18388 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
18389 #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
18390@@ -194,7 +199,7 @@ enum page_cache_mode {
18391 #ifdef CONFIG_X86_64
18392 #define __PAGE_KERNEL_IDENT_LARGE_EXEC __PAGE_KERNEL_LARGE_EXEC
18393 #else
18394-#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
18395+#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
18396 #define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
18397 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
18398 #endif
18399@@ -233,7 +238,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
18400 {
18401 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
18402 }
18403+#endif
18404
18405+#if PAGETABLE_LEVELS == 3
18406+#include <asm-generic/pgtable-nopud.h>
18407+#endif
18408+
18409+#if PAGETABLE_LEVELS == 2
18410+#include <asm-generic/pgtable-nopmd.h>
18411+#endif
18412+
18413+#ifndef __ASSEMBLY__
18414 #if PAGETABLE_LEVELS > 3
18415 typedef struct { pudval_t pud; } pud_t;
18416
18417@@ -247,8 +262,6 @@ static inline pudval_t native_pud_val(pud_t pud)
18418 return pud.pud;
18419 }
18420 #else
18421-#include <asm-generic/pgtable-nopud.h>
18422-
18423 static inline pudval_t native_pud_val(pud_t pud)
18424 {
18425 return native_pgd_val(pud.pgd);
18426@@ -268,8 +281,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
18427 return pmd.pmd;
18428 }
18429 #else
18430-#include <asm-generic/pgtable-nopmd.h>
18431-
18432 static inline pmdval_t native_pmd_val(pmd_t pmd)
18433 {
18434 return native_pgd_val(pmd.pud.pgd);
18435@@ -362,7 +373,6 @@ typedef struct page *pgtable_t;
18436
18437 extern pteval_t __supported_pte_mask;
18438 extern void set_nx(void);
18439-extern int nx_enabled;
18440
18441 #define pgprot_writecombine pgprot_writecombine
18442 extern pgprot_t pgprot_writecombine(pgprot_t prot);
18443diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h
18444index 8f327184..368fb29 100644
18445--- a/arch/x86/include/asm/preempt.h
18446+++ b/arch/x86/include/asm/preempt.h
18447@@ -84,7 +84,7 @@ static __always_inline void __preempt_count_sub(int val)
18448 */
18449 static __always_inline bool __preempt_count_dec_and_test(void)
18450 {
18451- GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), "e");
18452+ GEN_UNARY_RMWcc("decl", "incl", __preempt_count, __percpu_arg(0), "e");
18453 }
18454
18455 /*
18456diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
18457index ec1c935..5cc6023 100644
18458--- a/arch/x86/include/asm/processor.h
18459+++ b/arch/x86/include/asm/processor.h
18460@@ -127,7 +127,7 @@ struct cpuinfo_x86 {
18461 /* Index into per_cpu list: */
18462 u16 cpu_index;
18463 u32 microcode;
18464-};
18465+} __randomize_layout;
18466
18467 #define X86_VENDOR_INTEL 0
18468 #define X86_VENDOR_CYRIX 1
18469@@ -198,9 +198,21 @@ static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
18470 : "memory");
18471 }
18472
18473+/* invpcid (%rdx),%rax */
18474+#define __ASM_INVPCID ".byte 0x66,0x0f,0x38,0x82,0x02"
18475+
18476+#define INVPCID_SINGLE_ADDRESS 0UL
18477+#define INVPCID_SINGLE_CONTEXT 1UL
18478+#define INVPCID_ALL_GLOBAL 2UL
18479+#define INVPCID_ALL_NONGLOBAL 3UL
18480+
18481+#define PCID_KERNEL 0UL
18482+#define PCID_USER 1UL
18483+#define PCID_NOFLUSH (1UL << 63)
18484+
18485 static inline void load_cr3(pgd_t *pgdir)
18486 {
18487- write_cr3(__pa(pgdir));
18488+ write_cr3(__pa(pgdir) | PCID_KERNEL);
18489 }
18490
18491 #ifdef CONFIG_X86_32
18492@@ -282,7 +294,7 @@ struct tss_struct {
18493
18494 } ____cacheline_aligned;
18495
18496-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
18497+extern struct tss_struct init_tss[NR_CPUS];
18498
18499 /*
18500 * Save the original ist values for checking stack pointers during debugging
18501@@ -479,6 +491,7 @@ struct thread_struct {
18502 unsigned short ds;
18503 unsigned short fsindex;
18504 unsigned short gsindex;
18505+ unsigned short ss;
18506 #endif
18507 #ifdef CONFIG_X86_32
18508 unsigned long ip;
18509@@ -805,11 +818,18 @@ static inline void spin_lock_prefetch(const void *x)
18510 */
18511 #define TASK_SIZE PAGE_OFFSET
18512 #define TASK_SIZE_MAX TASK_SIZE
18513+
18514+#ifdef CONFIG_PAX_SEGMEXEC
18515+#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
18516+#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
18517+#else
18518 #define STACK_TOP TASK_SIZE
18519-#define STACK_TOP_MAX STACK_TOP
18520+#endif
18521+
18522+#define STACK_TOP_MAX TASK_SIZE
18523
18524 #define INIT_THREAD { \
18525- .sp0 = sizeof(init_stack) + (long)&init_stack, \
18526+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
18527 .vm86_info = NULL, \
18528 .sysenter_cs = __KERNEL_CS, \
18529 .io_bitmap_ptr = NULL, \
18530@@ -823,7 +843,7 @@ static inline void spin_lock_prefetch(const void *x)
18531 */
18532 #define INIT_TSS { \
18533 .x86_tss = { \
18534- .sp0 = sizeof(init_stack) + (long)&init_stack, \
18535+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
18536 .ss0 = __KERNEL_DS, \
18537 .ss1 = __KERNEL_CS, \
18538 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
18539@@ -834,11 +854,7 @@ static inline void spin_lock_prefetch(const void *x)
18540 extern unsigned long thread_saved_pc(struct task_struct *tsk);
18541
18542 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
18543-#define KSTK_TOP(info) \
18544-({ \
18545- unsigned long *__ptr = (unsigned long *)(info); \
18546- (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
18547-})
18548+#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
18549
18550 /*
18551 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
18552@@ -853,7 +869,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
18553 #define task_pt_regs(task) \
18554 ({ \
18555 struct pt_regs *__regs__; \
18556- __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
18557+ __regs__ = (struct pt_regs *)((task)->thread.sp0); \
18558 __regs__ - 1; \
18559 })
18560
18561@@ -869,13 +885,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
18562 * particular problem by preventing anything from being mapped
18563 * at the maximum canonical address.
18564 */
18565-#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
18566+#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
18567
18568 /* This decides where the kernel will search for a free chunk of vm
18569 * space during mmap's.
18570 */
18571 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
18572- 0xc0000000 : 0xFFFFe000)
18573+ 0xc0000000 : 0xFFFFf000)
18574
18575 #define TASK_SIZE (test_thread_flag(TIF_ADDR32) ? \
18576 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
18577@@ -886,11 +902,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
18578 #define STACK_TOP_MAX TASK_SIZE_MAX
18579
18580 #define INIT_THREAD { \
18581- .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
18582+ .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
18583 }
18584
18585 #define INIT_TSS { \
18586- .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
18587+ .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
18588 }
18589
18590 /*
18591@@ -918,6 +934,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
18592 */
18593 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
18594
18595+#ifdef CONFIG_PAX_SEGMEXEC
18596+#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
18597+#endif
18598+
18599 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
18600
18601 /* Get/set a process' ability to use the timestamp counter instruction */
18602@@ -962,7 +982,7 @@ static inline uint32_t hypervisor_cpuid_base(const char *sig, uint32_t leaves)
18603 return 0;
18604 }
18605
18606-extern unsigned long arch_align_stack(unsigned long sp);
18607+#define arch_align_stack(x) ((x) & ~0xfUL)
18608 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
18609
18610 void default_idle(void);
18611@@ -972,6 +992,6 @@ bool xen_set_default_idle(void);
18612 #define xen_set_default_idle 0
18613 #endif
18614
18615-void stop_this_cpu(void *dummy);
18616+void stop_this_cpu(void *dummy) __noreturn;
18617 void df_debug(struct pt_regs *regs, long error_code);
18618 #endif /* _ASM_X86_PROCESSOR_H */
18619diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
18620index 86fc2bb..bd5049a 100644
18621--- a/arch/x86/include/asm/ptrace.h
18622+++ b/arch/x86/include/asm/ptrace.h
18623@@ -89,28 +89,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
18624 }
18625
18626 /*
18627- * user_mode_vm(regs) determines whether a register set came from user mode.
18628+ * user_mode(regs) determines whether a register set came from user mode.
18629 * This is true if V8086 mode was enabled OR if the register set was from
18630 * protected mode with RPL-3 CS value. This tricky test checks that with
18631 * one comparison. Many places in the kernel can bypass this full check
18632- * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
18633+ * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
18634+ * be used.
18635 */
18636-static inline int user_mode(struct pt_regs *regs)
18637+static inline int user_mode_novm(struct pt_regs *regs)
18638 {
18639 #ifdef CONFIG_X86_32
18640 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
18641 #else
18642- return !!(regs->cs & 3);
18643+ return !!(regs->cs & SEGMENT_RPL_MASK);
18644 #endif
18645 }
18646
18647-static inline int user_mode_vm(struct pt_regs *regs)
18648+static inline int user_mode(struct pt_regs *regs)
18649 {
18650 #ifdef CONFIG_X86_32
18651 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
18652 USER_RPL;
18653 #else
18654- return user_mode(regs);
18655+ return user_mode_novm(regs);
18656 #endif
18657 }
18658
18659@@ -126,15 +127,16 @@ static inline int v8086_mode(struct pt_regs *regs)
18660 #ifdef CONFIG_X86_64
18661 static inline bool user_64bit_mode(struct pt_regs *regs)
18662 {
18663+ unsigned long cs = regs->cs & 0xffff;
18664 #ifndef CONFIG_PARAVIRT
18665 /*
18666 * On non-paravirt systems, this is the only long mode CPL 3
18667 * selector. We do not allow long mode selectors in the LDT.
18668 */
18669- return regs->cs == __USER_CS;
18670+ return cs == __USER_CS;
18671 #else
18672 /* Headers are too twisted for this to go in paravirt.h. */
18673- return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
18674+ return cs == __USER_CS || cs == pv_info.extra_user_64bit_cs;
18675 #endif
18676 }
18677
18678@@ -185,9 +187,11 @@ static inline unsigned long regs_get_register(struct pt_regs *regs,
18679 * Traps from the kernel do not save sp and ss.
18680 * Use the helper function to retrieve sp.
18681 */
18682- if (offset == offsetof(struct pt_regs, sp) &&
18683- regs->cs == __KERNEL_CS)
18684- return kernel_stack_pointer(regs);
18685+ if (offset == offsetof(struct pt_regs, sp)) {
18686+ unsigned long cs = regs->cs & 0xffff;
18687+ if (cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS)
18688+ return kernel_stack_pointer(regs);
18689+ }
18690 #endif
18691 return *(unsigned long *)((unsigned long)regs + offset);
18692 }
18693diff --git a/arch/x86/include/asm/qrwlock.h b/arch/x86/include/asm/qrwlock.h
18694index ae0e241..e80b10b 100644
18695--- a/arch/x86/include/asm/qrwlock.h
18696+++ b/arch/x86/include/asm/qrwlock.h
18697@@ -7,8 +7,8 @@
18698 #define queue_write_unlock queue_write_unlock
18699 static inline void queue_write_unlock(struct qrwlock *lock)
18700 {
18701- barrier();
18702- ACCESS_ONCE(*(u8 *)&lock->cnts) = 0;
18703+ barrier();
18704+ ACCESS_ONCE_RW(*(u8 *)&lock->cnts) = 0;
18705 }
18706 #endif
18707
18708diff --git a/arch/x86/include/asm/realmode.h b/arch/x86/include/asm/realmode.h
18709index 9c6b890..5305f53 100644
18710--- a/arch/x86/include/asm/realmode.h
18711+++ b/arch/x86/include/asm/realmode.h
18712@@ -22,16 +22,14 @@ struct real_mode_header {
18713 #endif
18714 /* APM/BIOS reboot */
18715 u32 machine_real_restart_asm;
18716-#ifdef CONFIG_X86_64
18717 u32 machine_real_restart_seg;
18718-#endif
18719 };
18720
18721 /* This must match data at trampoline_32/64.S */
18722 struct trampoline_header {
18723 #ifdef CONFIG_X86_32
18724 u32 start;
18725- u16 gdt_pad;
18726+ u16 boot_cs;
18727 u16 gdt_limit;
18728 u32 gdt_base;
18729 #else
18730diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
18731index a82c4f1..ac45053 100644
18732--- a/arch/x86/include/asm/reboot.h
18733+++ b/arch/x86/include/asm/reboot.h
18734@@ -6,13 +6,13 @@
18735 struct pt_regs;
18736
18737 struct machine_ops {
18738- void (*restart)(char *cmd);
18739- void (*halt)(void);
18740- void (*power_off)(void);
18741+ void (* __noreturn restart)(char *cmd);
18742+ void (* __noreturn halt)(void);
18743+ void (* __noreturn power_off)(void);
18744 void (*shutdown)(void);
18745 void (*crash_shutdown)(struct pt_regs *);
18746- void (*emergency_restart)(void);
18747-};
18748+ void (* __noreturn emergency_restart)(void);
18749+} __no_const;
18750
18751 extern struct machine_ops machine_ops;
18752
18753diff --git a/arch/x86/include/asm/rmwcc.h b/arch/x86/include/asm/rmwcc.h
18754index 8f7866a..e442f20 100644
18755--- a/arch/x86/include/asm/rmwcc.h
18756+++ b/arch/x86/include/asm/rmwcc.h
18757@@ -3,7 +3,34 @@
18758
18759 #ifdef CC_HAVE_ASM_GOTO
18760
18761-#define __GEN_RMWcc(fullop, var, cc, ...) \
18762+#ifdef CONFIG_PAX_REFCOUNT
18763+#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...) \
18764+do { \
18765+ asm_volatile_goto (fullop \
18766+ ";jno 0f\n" \
18767+ fullantiop \
18768+ ";int $4\n0:\n" \
18769+ _ASM_EXTABLE(0b, 0b) \
18770+ ";j" cc " %l[cc_label]" \
18771+ : : "m" (var), ## __VA_ARGS__ \
18772+ : "memory" : cc_label); \
18773+ return 0; \
18774+cc_label: \
18775+ return 1; \
18776+} while (0)
18777+#else
18778+#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...) \
18779+do { \
18780+ asm_volatile_goto (fullop ";j" cc " %l[cc_label]" \
18781+ : : "m" (var), ## __VA_ARGS__ \
18782+ : "memory" : cc_label); \
18783+ return 0; \
18784+cc_label: \
18785+ return 1; \
18786+} while (0)
18787+#endif
18788+
18789+#define __GEN_RMWcc_unchecked(fullop, var, cc, ...) \
18790 do { \
18791 asm_volatile_goto (fullop "; j" cc " %l[cc_label]" \
18792 : : "m" (var), ## __VA_ARGS__ \
18793@@ -13,15 +40,46 @@ cc_label: \
18794 return 1; \
18795 } while (0)
18796
18797-#define GEN_UNARY_RMWcc(op, var, arg0, cc) \
18798- __GEN_RMWcc(op " " arg0, var, cc)
18799+#define GEN_UNARY_RMWcc(op, antiop, var, arg0, cc) \
18800+ __GEN_RMWcc(op " " arg0, antiop " " arg0, var, cc)
18801
18802-#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \
18803- __GEN_RMWcc(op " %1, " arg0, var, cc, vcon (val))
18804+#define GEN_UNARY_RMWcc_unchecked(op, var, arg0, cc) \
18805+ __GEN_RMWcc_unchecked(op " " arg0, var, cc)
18806+
18807+#define GEN_BINARY_RMWcc(op, antiop, var, vcon, val, arg0, cc) \
18808+ __GEN_RMWcc(op " %1, " arg0, antiop " %1, " arg0, var, cc, vcon (val))
18809+
18810+#define GEN_BINARY_RMWcc_unchecked(op, var, vcon, val, arg0, cc) \
18811+ __GEN_RMWcc_unchecked(op " %1, " arg0, var, cc, vcon (val))
18812
18813 #else /* !CC_HAVE_ASM_GOTO */
18814
18815-#define __GEN_RMWcc(fullop, var, cc, ...) \
18816+#ifdef CONFIG_PAX_REFCOUNT
18817+#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...) \
18818+do { \
18819+ char c; \
18820+ asm volatile (fullop \
18821+ ";jno 0f\n" \
18822+ fullantiop \
18823+ ";int $4\n0:\n" \
18824+ _ASM_EXTABLE(0b, 0b) \
18825+ "; set" cc " %1" \
18826+ : "+m" (var), "=qm" (c) \
18827+ : __VA_ARGS__ : "memory"); \
18828+ return c != 0; \
18829+} while (0)
18830+#else
18831+#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...) \
18832+do { \
18833+ char c; \
18834+ asm volatile (fullop "; set" cc " %1" \
18835+ : "+m" (var), "=qm" (c) \
18836+ : __VA_ARGS__ : "memory"); \
18837+ return c != 0; \
18838+} while (0)
18839+#endif
18840+
18841+#define __GEN_RMWcc_unchecked(fullop, var, cc, ...) \
18842 do { \
18843 char c; \
18844 asm volatile (fullop "; set" cc " %1" \
18845@@ -30,11 +88,17 @@ do { \
18846 return c != 0; \
18847 } while (0)
18848
18849-#define GEN_UNARY_RMWcc(op, var, arg0, cc) \
18850- __GEN_RMWcc(op " " arg0, var, cc)
18851+#define GEN_UNARY_RMWcc(op, antiop, var, arg0, cc) \
18852+ __GEN_RMWcc(op " " arg0, antiop " " arg0, var, cc)
18853+
18854+#define GEN_UNARY_RMWcc_unchecked(op, var, arg0, cc) \
18855+ __GEN_RMWcc_unchecked(op " " arg0, var, cc)
18856+
18857+#define GEN_BINARY_RMWcc(op, antiop, var, vcon, val, arg0, cc) \
18858+ __GEN_RMWcc(op " %2, " arg0, antiop " %2, " arg0, var, cc, vcon (val))
18859
18860-#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \
18861- __GEN_RMWcc(op " %2, " arg0, var, cc, vcon (val))
18862+#define GEN_BINARY_RMWcc_unchecked(op, var, vcon, val, arg0, cc) \
18863+ __GEN_RMWcc_unchecked(op " %2, " arg0, var, cc, vcon (val))
18864
18865 #endif /* CC_HAVE_ASM_GOTO */
18866
18867diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
18868index cad82c9..2e5c5c1 100644
18869--- a/arch/x86/include/asm/rwsem.h
18870+++ b/arch/x86/include/asm/rwsem.h
18871@@ -64,6 +64,14 @@ static inline void __down_read(struct rw_semaphore *sem)
18872 {
18873 asm volatile("# beginning down_read\n\t"
18874 LOCK_PREFIX _ASM_INC "(%1)\n\t"
18875+
18876+#ifdef CONFIG_PAX_REFCOUNT
18877+ "jno 0f\n"
18878+ LOCK_PREFIX _ASM_DEC "(%1)\n"
18879+ "int $4\n0:\n"
18880+ _ASM_EXTABLE(0b, 0b)
18881+#endif
18882+
18883 /* adds 0x00000001 */
18884 " jns 1f\n"
18885 " call call_rwsem_down_read_failed\n"
18886@@ -85,6 +93,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
18887 "1:\n\t"
18888 " mov %1,%2\n\t"
18889 " add %3,%2\n\t"
18890+
18891+#ifdef CONFIG_PAX_REFCOUNT
18892+ "jno 0f\n"
18893+ "sub %3,%2\n"
18894+ "int $4\n0:\n"
18895+ _ASM_EXTABLE(0b, 0b)
18896+#endif
18897+
18898 " jle 2f\n\t"
18899 LOCK_PREFIX " cmpxchg %2,%0\n\t"
18900 " jnz 1b\n\t"
18901@@ -104,6 +120,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
18902 long tmp;
18903 asm volatile("# beginning down_write\n\t"
18904 LOCK_PREFIX " xadd %1,(%2)\n\t"
18905+
18906+#ifdef CONFIG_PAX_REFCOUNT
18907+ "jno 0f\n"
18908+ "mov %1,(%2)\n"
18909+ "int $4\n0:\n"
18910+ _ASM_EXTABLE(0b, 0b)
18911+#endif
18912+
18913 /* adds 0xffff0001, returns the old value */
18914 " test " __ASM_SEL(%w1,%k1) "," __ASM_SEL(%w1,%k1) "\n\t"
18915 /* was the active mask 0 before? */
18916@@ -155,6 +179,14 @@ static inline void __up_read(struct rw_semaphore *sem)
18917 long tmp;
18918 asm volatile("# beginning __up_read\n\t"
18919 LOCK_PREFIX " xadd %1,(%2)\n\t"
18920+
18921+#ifdef CONFIG_PAX_REFCOUNT
18922+ "jno 0f\n"
18923+ "mov %1,(%2)\n"
18924+ "int $4\n0:\n"
18925+ _ASM_EXTABLE(0b, 0b)
18926+#endif
18927+
18928 /* subtracts 1, returns the old value */
18929 " jns 1f\n\t"
18930 " call call_rwsem_wake\n" /* expects old value in %edx */
18931@@ -173,6 +205,14 @@ static inline void __up_write(struct rw_semaphore *sem)
18932 long tmp;
18933 asm volatile("# beginning __up_write\n\t"
18934 LOCK_PREFIX " xadd %1,(%2)\n\t"
18935+
18936+#ifdef CONFIG_PAX_REFCOUNT
18937+ "jno 0f\n"
18938+ "mov %1,(%2)\n"
18939+ "int $4\n0:\n"
18940+ _ASM_EXTABLE(0b, 0b)
18941+#endif
18942+
18943 /* subtracts 0xffff0001, returns the old value */
18944 " jns 1f\n\t"
18945 " call call_rwsem_wake\n" /* expects old value in %edx */
18946@@ -190,6 +230,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
18947 {
18948 asm volatile("# beginning __downgrade_write\n\t"
18949 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
18950+
18951+#ifdef CONFIG_PAX_REFCOUNT
18952+ "jno 0f\n"
18953+ LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
18954+ "int $4\n0:\n"
18955+ _ASM_EXTABLE(0b, 0b)
18956+#endif
18957+
18958 /*
18959 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
18960 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
18961@@ -208,7 +256,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
18962 */
18963 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
18964 {
18965- asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
18966+ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
18967+
18968+#ifdef CONFIG_PAX_REFCOUNT
18969+ "jno 0f\n"
18970+ LOCK_PREFIX _ASM_SUB "%1,%0\n"
18971+ "int $4\n0:\n"
18972+ _ASM_EXTABLE(0b, 0b)
18973+#endif
18974+
18975 : "+m" (sem->count)
18976 : "er" (delta));
18977 }
18978@@ -218,7 +274,7 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
18979 */
18980 static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
18981 {
18982- return delta + xadd(&sem->count, delta);
18983+ return delta + xadd_check_overflow(&sem->count, delta);
18984 }
18985
18986 #endif /* __KERNEL__ */
18987diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
18988index e657b7b..81fefb444 100644
18989--- a/arch/x86/include/asm/segment.h
18990+++ b/arch/x86/include/asm/segment.h
18991@@ -73,10 +73,15 @@
18992 * 26 - ESPFIX small SS
18993 * 27 - per-cpu [ offset to per-cpu data area ]
18994 * 28 - stack_canary-20 [ for stack protector ]
18995- * 29 - unused
18996- * 30 - unused
18997+ * 29 - PCI BIOS CS
18998+ * 30 - PCI BIOS DS
18999 * 31 - TSS for double fault handler
19000 */
19001+#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
19002+#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
19003+#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
19004+#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
19005+
19006 #define GDT_ENTRY_TLS_MIN 6
19007 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
19008
19009@@ -88,6 +93,8 @@
19010
19011 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
19012
19013+#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
19014+
19015 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
19016
19017 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
19018@@ -113,6 +120,12 @@
19019 #define __KERNEL_STACK_CANARY 0
19020 #endif
19021
19022+#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
19023+#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
19024+
19025+#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
19026+#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
19027+
19028 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
19029
19030 /*
19031@@ -140,7 +153,7 @@
19032 */
19033
19034 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
19035-#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
19036+#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
19037
19038
19039 #else
19040@@ -164,6 +177,8 @@
19041 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS*8+3)
19042 #define __USER32_DS __USER_DS
19043
19044+#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
19045+
19046 #define GDT_ENTRY_TSS 8 /* needs two entries */
19047 #define GDT_ENTRY_LDT 10 /* needs two entries */
19048 #define GDT_ENTRY_TLS_MIN 12
19049@@ -172,6 +187,8 @@
19050 #define GDT_ENTRY_PER_CPU 15 /* Abused to load per CPU data from limit */
19051 #define __PER_CPU_SEG (GDT_ENTRY_PER_CPU * 8 + 3)
19052
19053+#define GDT_ENTRY_UDEREF_KERNEL_DS 16
19054+
19055 /* TLS indexes for 64bit - hardcoded in arch_prctl */
19056 #define FS_TLS 0
19057 #define GS_TLS 1
19058@@ -179,12 +196,14 @@
19059 #define GS_TLS_SEL ((GDT_ENTRY_TLS_MIN+GS_TLS)*8 + 3)
19060 #define FS_TLS_SEL ((GDT_ENTRY_TLS_MIN+FS_TLS)*8 + 3)
19061
19062-#define GDT_ENTRIES 16
19063+#define GDT_ENTRIES 17
19064
19065 #endif
19066
19067 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
19068+#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
19069 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
19070+#define __UDEREF_KERNEL_DS (GDT_ENTRY_UDEREF_KERNEL_DS*8)
19071 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
19072 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
19073 #ifndef CONFIG_PARAVIRT
19074@@ -267,7 +286,7 @@ static inline unsigned long get_limit(unsigned long segment)
19075 {
19076 unsigned long __limit;
19077 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
19078- return __limit + 1;
19079+ return __limit;
19080 }
19081
19082 #endif /* !__ASSEMBLY__ */
19083diff --git a/arch/x86/include/asm/smap.h b/arch/x86/include/asm/smap.h
19084index 8d3120f..352b440 100644
19085--- a/arch/x86/include/asm/smap.h
19086+++ b/arch/x86/include/asm/smap.h
19087@@ -25,11 +25,40 @@
19088
19089 #include <asm/alternative-asm.h>
19090
19091+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19092+#define ASM_PAX_OPEN_USERLAND \
19093+ 661: jmp 663f; \
19094+ .pushsection .altinstr_replacement, "a" ; \
19095+ 662: pushq %rax; nop; \
19096+ .popsection ; \
19097+ .pushsection .altinstructions, "a" ; \
19098+ altinstruction_entry 661b, 662b, X86_FEATURE_STRONGUDEREF, 2, 2;\
19099+ .popsection ; \
19100+ call __pax_open_userland; \
19101+ popq %rax; \
19102+ 663:
19103+
19104+#define ASM_PAX_CLOSE_USERLAND \
19105+ 661: jmp 663f; \
19106+ .pushsection .altinstr_replacement, "a" ; \
19107+ 662: pushq %rax; nop; \
19108+ .popsection; \
19109+ .pushsection .altinstructions, "a" ; \
19110+ altinstruction_entry 661b, 662b, X86_FEATURE_STRONGUDEREF, 2, 2;\
19111+ .popsection; \
19112+ call __pax_close_userland; \
19113+ popq %rax; \
19114+ 663:
19115+#else
19116+#define ASM_PAX_OPEN_USERLAND
19117+#define ASM_PAX_CLOSE_USERLAND
19118+#endif
19119+
19120 #ifdef CONFIG_X86_SMAP
19121
19122 #define ASM_CLAC \
19123 661: ASM_NOP3 ; \
19124- .pushsection .altinstr_replacement, "ax" ; \
19125+ .pushsection .altinstr_replacement, "a" ; \
19126 662: __ASM_CLAC ; \
19127 .popsection ; \
19128 .pushsection .altinstructions, "a" ; \
19129@@ -38,7 +67,7 @@
19130
19131 #define ASM_STAC \
19132 661: ASM_NOP3 ; \
19133- .pushsection .altinstr_replacement, "ax" ; \
19134+ .pushsection .altinstr_replacement, "a" ; \
19135 662: __ASM_STAC ; \
19136 .popsection ; \
19137 .pushsection .altinstructions, "a" ; \
19138@@ -56,6 +85,37 @@
19139
19140 #include <asm/alternative.h>
19141
19142+#define __HAVE_ARCH_PAX_OPEN_USERLAND
19143+#define __HAVE_ARCH_PAX_CLOSE_USERLAND
19144+
19145+extern void __pax_open_userland(void);
19146+static __always_inline unsigned long pax_open_userland(void)
19147+{
19148+
19149+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19150+ asm volatile(ALTERNATIVE(ASM_NOP5, "call %P[open]", X86_FEATURE_STRONGUDEREF)
19151+ :
19152+ : [open] "i" (__pax_open_userland)
19153+ : "memory", "rax");
19154+#endif
19155+
19156+ return 0;
19157+}
19158+
19159+extern void __pax_close_userland(void);
19160+static __always_inline unsigned long pax_close_userland(void)
19161+{
19162+
19163+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19164+ asm volatile(ALTERNATIVE(ASM_NOP5, "call %P[close]", X86_FEATURE_STRONGUDEREF)
19165+ :
19166+ : [close] "i" (__pax_close_userland)
19167+ : "memory", "rax");
19168+#endif
19169+
19170+ return 0;
19171+}
19172+
19173 #ifdef CONFIG_X86_SMAP
19174
19175 static __always_inline void clac(void)
19176diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
19177index 8cd1cc3..827e09e 100644
19178--- a/arch/x86/include/asm/smp.h
19179+++ b/arch/x86/include/asm/smp.h
19180@@ -35,7 +35,7 @@ DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map);
19181 /* cpus sharing the last level cache: */
19182 DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map);
19183 DECLARE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id);
19184-DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number);
19185+DECLARE_PER_CPU_READ_MOSTLY(unsigned int, cpu_number);
19186
19187 static inline struct cpumask *cpu_sibling_mask(int cpu)
19188 {
19189@@ -78,7 +78,7 @@ struct smp_ops {
19190
19191 void (*send_call_func_ipi)(const struct cpumask *mask);
19192 void (*send_call_func_single_ipi)(int cpu);
19193-};
19194+} __no_const;
19195
19196 /* Globals due to paravirt */
19197 extern void set_cpu_sibling_map(int cpu);
19198@@ -191,14 +191,8 @@ extern unsigned disabled_cpus;
19199 extern int safe_smp_processor_id(void);
19200
19201 #elif defined(CONFIG_X86_64_SMP)
19202-#define raw_smp_processor_id() (this_cpu_read(cpu_number))
19203-
19204-#define stack_smp_processor_id() \
19205-({ \
19206- struct thread_info *ti; \
19207- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
19208- ti->cpu; \
19209-})
19210+#define raw_smp_processor_id() (this_cpu_read(cpu_number))
19211+#define stack_smp_processor_id() raw_smp_processor_id()
19212 #define safe_smp_processor_id() smp_processor_id()
19213
19214 #endif
19215diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
19216index 6a99859..03cb807 100644
19217--- a/arch/x86/include/asm/stackprotector.h
19218+++ b/arch/x86/include/asm/stackprotector.h
19219@@ -47,7 +47,7 @@
19220 * head_32 for boot CPU and setup_per_cpu_areas() for others.
19221 */
19222 #define GDT_STACK_CANARY_INIT \
19223- [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
19224+ [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
19225
19226 /*
19227 * Initialize the stackprotector canary value.
19228@@ -112,7 +112,7 @@ static inline void setup_stack_canary_segment(int cpu)
19229
19230 static inline void load_stack_canary_segment(void)
19231 {
19232-#ifdef CONFIG_X86_32
19233+#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
19234 asm volatile ("mov %0, %%gs" : : "r" (0));
19235 #endif
19236 }
19237diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
19238index 70bbe39..4ae2bd4 100644
19239--- a/arch/x86/include/asm/stacktrace.h
19240+++ b/arch/x86/include/asm/stacktrace.h
19241@@ -11,28 +11,20 @@
19242
19243 extern int kstack_depth_to_print;
19244
19245-struct thread_info;
19246+struct task_struct;
19247 struct stacktrace_ops;
19248
19249-typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
19250- unsigned long *stack,
19251- unsigned long bp,
19252- const struct stacktrace_ops *ops,
19253- void *data,
19254- unsigned long *end,
19255- int *graph);
19256+typedef unsigned long walk_stack_t(struct task_struct *task,
19257+ void *stack_start,
19258+ unsigned long *stack,
19259+ unsigned long bp,
19260+ const struct stacktrace_ops *ops,
19261+ void *data,
19262+ unsigned long *end,
19263+ int *graph);
19264
19265-extern unsigned long
19266-print_context_stack(struct thread_info *tinfo,
19267- unsigned long *stack, unsigned long bp,
19268- const struct stacktrace_ops *ops, void *data,
19269- unsigned long *end, int *graph);
19270-
19271-extern unsigned long
19272-print_context_stack_bp(struct thread_info *tinfo,
19273- unsigned long *stack, unsigned long bp,
19274- const struct stacktrace_ops *ops, void *data,
19275- unsigned long *end, int *graph);
19276+extern walk_stack_t print_context_stack;
19277+extern walk_stack_t print_context_stack_bp;
19278
19279 /* Generic stack tracer with callbacks */
19280
19281@@ -40,7 +32,7 @@ struct stacktrace_ops {
19282 void (*address)(void *data, unsigned long address, int reliable);
19283 /* On negative return stop dumping */
19284 int (*stack)(void *data, char *name);
19285- walk_stack_t walk_stack;
19286+ walk_stack_t *walk_stack;
19287 };
19288
19289 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
19290diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
19291index 751bf4b..a1278b5 100644
19292--- a/arch/x86/include/asm/switch_to.h
19293+++ b/arch/x86/include/asm/switch_to.h
19294@@ -112,7 +112,7 @@ do { \
19295 "call __switch_to\n\t" \
19296 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
19297 __switch_canary \
19298- "movq %P[thread_info](%%rsi),%%r8\n\t" \
19299+ "movq "__percpu_arg([thread_info])",%%r8\n\t" \
19300 "movq %%rax,%%rdi\n\t" \
19301 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
19302 "jnz ret_from_fork\n\t" \
19303@@ -123,7 +123,7 @@ do { \
19304 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
19305 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
19306 [_tif_fork] "i" (_TIF_FORK), \
19307- [thread_info] "i" (offsetof(struct task_struct, stack)), \
19308+ [thread_info] "m" (current_tinfo), \
19309 [current_task] "m" (current_task) \
19310 __switch_canary_iparam \
19311 : "memory", "cc" __EXTRA_CLOBBER)
19312diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
19313index 1d4e4f2..506db18 100644
19314--- a/arch/x86/include/asm/thread_info.h
19315+++ b/arch/x86/include/asm/thread_info.h
19316@@ -24,7 +24,6 @@ struct exec_domain;
19317 #include <linux/atomic.h>
19318
19319 struct thread_info {
19320- struct task_struct *task; /* main task structure */
19321 struct exec_domain *exec_domain; /* execution domain */
19322 __u32 flags; /* low level flags */
19323 __u32 status; /* thread synchronous flags */
19324@@ -32,13 +31,13 @@ struct thread_info {
19325 int saved_preempt_count;
19326 mm_segment_t addr_limit;
19327 void __user *sysenter_return;
19328+ unsigned long lowest_stack;
19329 unsigned int sig_on_uaccess_error:1;
19330 unsigned int uaccess_err:1; /* uaccess failed */
19331 };
19332
19333-#define INIT_THREAD_INFO(tsk) \
19334+#define INIT_THREAD_INFO \
19335 { \
19336- .task = &tsk, \
19337 .exec_domain = &default_exec_domain, \
19338 .flags = 0, \
19339 .cpu = 0, \
19340@@ -46,7 +45,7 @@ struct thread_info {
19341 .addr_limit = KERNEL_DS, \
19342 }
19343
19344-#define init_thread_info (init_thread_union.thread_info)
19345+#define init_thread_info (init_thread_union.stack)
19346 #define init_stack (init_thread_union.stack)
19347
19348 #else /* !__ASSEMBLY__ */
19349@@ -86,6 +85,7 @@ struct thread_info {
19350 #define TIF_SYSCALL_TRACEPOINT 28 /* syscall tracepoint instrumentation */
19351 #define TIF_ADDR32 29 /* 32-bit address space on 64 bits */
19352 #define TIF_X32 30 /* 32-bit native x86-64 binary */
19353+#define TIF_GRSEC_SETXID 31 /* update credentials on syscall entry/exit */
19354
19355 #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
19356 #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
19357@@ -109,17 +109,18 @@ struct thread_info {
19358 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
19359 #define _TIF_ADDR32 (1 << TIF_ADDR32)
19360 #define _TIF_X32 (1 << TIF_X32)
19361+#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
19362
19363 /* work to do in syscall_trace_enter() */
19364 #define _TIF_WORK_SYSCALL_ENTRY \
19365 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | _TIF_SYSCALL_AUDIT | \
19366 _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT | \
19367- _TIF_NOHZ)
19368+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
19369
19370 /* work to do in syscall_trace_leave() */
19371 #define _TIF_WORK_SYSCALL_EXIT \
19372 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP | \
19373- _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ)
19374+ _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ | _TIF_GRSEC_SETXID)
19375
19376 /* work to do on interrupt/exception return */
19377 #define _TIF_WORK_MASK \
19378@@ -130,7 +131,7 @@ struct thread_info {
19379 /* work to do on any return to user space */
19380 #define _TIF_ALLWORK_MASK \
19381 ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT | \
19382- _TIF_NOHZ)
19383+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
19384
19385 /* Only used for 64 bit */
19386 #define _TIF_DO_NOTIFY_MASK \
19387@@ -145,7 +146,6 @@ struct thread_info {
19388 #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)
19389
19390 #define STACK_WARN (THREAD_SIZE/8)
19391-#define KERNEL_STACK_OFFSET (5*(BITS_PER_LONG/8))
19392
19393 /*
19394 * macros/functions for gaining access to the thread information structure
19395@@ -156,12 +156,11 @@ struct thread_info {
19396
19397 DECLARE_PER_CPU(unsigned long, kernel_stack);
19398
19399+DECLARE_PER_CPU(struct thread_info *, current_tinfo);
19400+
19401 static inline struct thread_info *current_thread_info(void)
19402 {
19403- struct thread_info *ti;
19404- ti = (void *)(this_cpu_read_stable(kernel_stack) +
19405- KERNEL_STACK_OFFSET - THREAD_SIZE);
19406- return ti;
19407+ return this_cpu_read_stable(current_tinfo);
19408 }
19409
19410 static inline unsigned long current_stack_pointer(void)
19411@@ -179,14 +178,7 @@ static inline unsigned long current_stack_pointer(void)
19412
19413 /* how to get the thread information struct from ASM */
19414 #define GET_THREAD_INFO(reg) \
19415- _ASM_MOV PER_CPU_VAR(kernel_stack),reg ; \
19416- _ASM_SUB $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg ;
19417-
19418-/*
19419- * Same if PER_CPU_VAR(kernel_stack) is, perhaps with some offset, already in
19420- * a certain register (to be used in assembler memory operands).
19421- */
19422-#define THREAD_INFO(reg, off) KERNEL_STACK_OFFSET+(off)-THREAD_SIZE(reg)
19423+ _ASM_MOV PER_CPU_VAR(current_tinfo),reg ;
19424
19425 #endif
19426
19427@@ -242,5 +234,12 @@ static inline bool is_ia32_task(void)
19428 extern void arch_task_cache_init(void);
19429 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
19430 extern void arch_release_task_struct(struct task_struct *tsk);
19431+
19432+#define __HAVE_THREAD_FUNCTIONS
19433+#define task_thread_info(task) (&(task)->tinfo)
19434+#define task_stack_page(task) ((task)->stack)
19435+#define setup_thread_stack(p, org) do {} while (0)
19436+#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
19437+
19438 #endif
19439 #endif /* _ASM_X86_THREAD_INFO_H */
19440diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
19441index cd79194..e7a9491 100644
19442--- a/arch/x86/include/asm/tlbflush.h
19443+++ b/arch/x86/include/asm/tlbflush.h
19444@@ -86,18 +86,44 @@ static inline void cr4_set_bits_and_update_boot(unsigned long mask)
19445
19446 static inline void __native_flush_tlb(void)
19447 {
19448+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
19449+ u64 descriptor[2];
19450+
19451+ descriptor[0] = PCID_KERNEL;
19452+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_ALL_NONGLOBAL) : "memory");
19453+ return;
19454+ }
19455+
19456+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19457+ if (static_cpu_has(X86_FEATURE_PCID)) {
19458+ unsigned int cpu = raw_get_cpu();
19459+
19460+ native_write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
19461+ native_write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
19462+ raw_put_cpu_no_resched();
19463+ return;
19464+ }
19465+#endif
19466+
19467 native_write_cr3(native_read_cr3());
19468 }
19469
19470 static inline void __native_flush_tlb_global_irq_disabled(void)
19471 {
19472- unsigned long cr4;
19473+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
19474+ u64 descriptor[2];
19475
19476- cr4 = this_cpu_read(cpu_tlbstate.cr4);
19477- /* clear PGE */
19478- native_write_cr4(cr4 & ~X86_CR4_PGE);
19479- /* write old PGE again and flush TLBs */
19480- native_write_cr4(cr4);
19481+ descriptor[0] = PCID_KERNEL;
19482+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_ALL_GLOBAL) : "memory");
19483+ } else {
19484+ unsigned long cr4;
19485+
19486+ cr4 = this_cpu_read(cpu_tlbstate.cr4);
19487+ /* clear PGE */
19488+ native_write_cr4(cr4 & ~X86_CR4_PGE);
19489+ /* write old PGE again and flush TLBs */
19490+ native_write_cr4(cr4);
19491+ }
19492 }
19493
19494 static inline void __native_flush_tlb_global(void)
19495@@ -118,6 +144,41 @@ static inline void __native_flush_tlb_global(void)
19496
19497 static inline void __native_flush_tlb_single(unsigned long addr)
19498 {
19499+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
19500+ u64 descriptor[2];
19501+
19502+ descriptor[0] = PCID_KERNEL;
19503+ descriptor[1] = addr;
19504+
19505+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19506+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF) || addr >= TASK_SIZE_MAX) {
19507+ if (addr < TASK_SIZE_MAX)
19508+ descriptor[1] += pax_user_shadow_base;
19509+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_ADDRESS) : "memory");
19510+ }
19511+
19512+ descriptor[0] = PCID_USER;
19513+ descriptor[1] = addr;
19514+#endif
19515+
19516+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_ADDRESS) : "memory");
19517+ return;
19518+ }
19519+
19520+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19521+ if (static_cpu_has(X86_FEATURE_PCID)) {
19522+ unsigned int cpu = raw_get_cpu();
19523+
19524+ native_write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER | PCID_NOFLUSH);
19525+ asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
19526+ native_write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
19527+ raw_put_cpu_no_resched();
19528+
19529+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF) && addr < TASK_SIZE_MAX)
19530+ addr += pax_user_shadow_base;
19531+ }
19532+#endif
19533+
19534 asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
19535 }
19536
19537diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
19538index ace9dec..3f9e253 100644
19539--- a/arch/x86/include/asm/uaccess.h
19540+++ b/arch/x86/include/asm/uaccess.h
19541@@ -7,6 +7,7 @@
19542 #include <linux/compiler.h>
19543 #include <linux/thread_info.h>
19544 #include <linux/string.h>
19545+#include <linux/spinlock.h>
19546 #include <asm/asm.h>
19547 #include <asm/page.h>
19548 #include <asm/smap.h>
19549@@ -29,7 +30,12 @@
19550
19551 #define get_ds() (KERNEL_DS)
19552 #define get_fs() (current_thread_info()->addr_limit)
19553+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
19554+void __set_fs(mm_segment_t x);
19555+void set_fs(mm_segment_t x);
19556+#else
19557 #define set_fs(x) (current_thread_info()->addr_limit = (x))
19558+#endif
19559
19560 #define segment_eq(a, b) ((a).seg == (b).seg)
19561
19562@@ -85,8 +91,36 @@ static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, un
19563 * checks that the pointer is in the user space range - after calling
19564 * this function, memory access functions may still return -EFAULT.
19565 */
19566-#define access_ok(type, addr, size) \
19567- likely(!__range_not_ok(addr, size, user_addr_max()))
19568+extern int _cond_resched(void);
19569+#define access_ok_noprefault(type, addr, size) (likely(!__range_not_ok(addr, size, user_addr_max())))
19570+#define access_ok(type, addr, size) \
19571+({ \
19572+ unsigned long __size = size; \
19573+ unsigned long __addr = (unsigned long)addr; \
19574+ bool __ret_ao = __range_not_ok(__addr, __size, user_addr_max()) == 0;\
19575+ if (__ret_ao && __size) { \
19576+ unsigned long __addr_ao = __addr & PAGE_MASK; \
19577+ unsigned long __end_ao = __addr + __size - 1; \
19578+ if (unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
19579+ while (__addr_ao <= __end_ao) { \
19580+ char __c_ao; \
19581+ __addr_ao += PAGE_SIZE; \
19582+ if (__size > PAGE_SIZE) \
19583+ _cond_resched(); \
19584+ if (__get_user(__c_ao, (char __user *)__addr)) \
19585+ break; \
19586+ if (type != VERIFY_WRITE) { \
19587+ __addr = __addr_ao; \
19588+ continue; \
19589+ } \
19590+ if (__put_user(__c_ao, (char __user *)__addr)) \
19591+ break; \
19592+ __addr = __addr_ao; \
19593+ } \
19594+ } \
19595+ } \
19596+ __ret_ao; \
19597+})
19598
19599 /*
19600 * The exception table consists of pairs of addresses relative to the
19601@@ -134,11 +168,13 @@ extern int __get_user_8(void);
19602 extern int __get_user_bad(void);
19603
19604 /*
19605- * This is a type: either unsigned long, if the argument fits into
19606- * that type, or otherwise unsigned long long.
19607+ * This is a type: either (un)signed int, if the argument fits into
19608+ * that type, or otherwise (un)signed long long.
19609 */
19610 #define __inttype(x) \
19611-__typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
19612+__typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0U), \
19613+ __builtin_choose_expr(__type_is_unsigned(__typeof__(x)), 0ULL, 0LL),\
19614+ __builtin_choose_expr(__type_is_unsigned(__typeof__(x)), 0U, 0)))
19615
19616 /**
19617 * get_user: - Get a simple variable from user space.
19618@@ -176,10 +212,12 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
19619 register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX); \
19620 __chk_user_ptr(ptr); \
19621 might_fault(); \
19622+ pax_open_userland(); \
19623 asm volatile("call __get_user_%P3" \
19624 : "=a" (__ret_gu), "=r" (__val_gu) \
19625 : "0" (ptr), "i" (sizeof(*(ptr)))); \
19626 (x) = (__force __typeof__(*(ptr))) __val_gu; \
19627+ pax_close_userland(); \
19628 __ret_gu; \
19629 })
19630
19631@@ -187,13 +225,21 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
19632 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
19633 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
19634
19635-
19636+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
19637+#define __copyuser_seg "gs;"
19638+#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
19639+#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
19640+#else
19641+#define __copyuser_seg
19642+#define __COPYUSER_SET_ES
19643+#define __COPYUSER_RESTORE_ES
19644+#endif
19645
19646 #ifdef CONFIG_X86_32
19647 #define __put_user_asm_u64(x, addr, err, errret) \
19648 asm volatile(ASM_STAC "\n" \
19649- "1: movl %%eax,0(%2)\n" \
19650- "2: movl %%edx,4(%2)\n" \
19651+ "1: "__copyuser_seg"movl %%eax,0(%2)\n" \
19652+ "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
19653 "3: " ASM_CLAC "\n" \
19654 ".section .fixup,\"ax\"\n" \
19655 "4: movl %3,%0\n" \
19656@@ -206,8 +252,8 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
19657
19658 #define __put_user_asm_ex_u64(x, addr) \
19659 asm volatile(ASM_STAC "\n" \
19660- "1: movl %%eax,0(%1)\n" \
19661- "2: movl %%edx,4(%1)\n" \
19662+ "1: "__copyuser_seg"movl %%eax,0(%1)\n" \
19663+ "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
19664 "3: " ASM_CLAC "\n" \
19665 _ASM_EXTABLE_EX(1b, 2b) \
19666 _ASM_EXTABLE_EX(2b, 3b) \
19667@@ -257,7 +303,8 @@ extern void __put_user_8(void);
19668 __typeof__(*(ptr)) __pu_val; \
19669 __chk_user_ptr(ptr); \
19670 might_fault(); \
19671- __pu_val = x; \
19672+ __pu_val = (x); \
19673+ pax_open_userland(); \
19674 switch (sizeof(*(ptr))) { \
19675 case 1: \
19676 __put_user_x(1, __pu_val, ptr, __ret_pu); \
19677@@ -275,6 +322,7 @@ extern void __put_user_8(void);
19678 __put_user_x(X, __pu_val, ptr, __ret_pu); \
19679 break; \
19680 } \
19681+ pax_close_userland(); \
19682 __ret_pu; \
19683 })
19684
19685@@ -355,8 +403,10 @@ do { \
19686 } while (0)
19687
19688 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
19689+do { \
19690+ pax_open_userland(); \
19691 asm volatile(ASM_STAC "\n" \
19692- "1: mov"itype" %2,%"rtype"1\n" \
19693+ "1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
19694 "2: " ASM_CLAC "\n" \
19695 ".section .fixup,\"ax\"\n" \
19696 "3: mov %3,%0\n" \
19697@@ -364,8 +414,10 @@ do { \
19698 " jmp 2b\n" \
19699 ".previous\n" \
19700 _ASM_EXTABLE(1b, 3b) \
19701- : "=r" (err), ltype(x) \
19702- : "m" (__m(addr)), "i" (errret), "0" (err))
19703+ : "=r" (err), ltype (x) \
19704+ : "m" (__m(addr)), "i" (errret), "0" (err)); \
19705+ pax_close_userland(); \
19706+} while (0)
19707
19708 #define __get_user_size_ex(x, ptr, size) \
19709 do { \
19710@@ -389,7 +441,7 @@ do { \
19711 } while (0)
19712
19713 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
19714- asm volatile("1: mov"itype" %1,%"rtype"0\n" \
19715+ asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
19716 "2:\n" \
19717 _ASM_EXTABLE_EX(1b, 2b) \
19718 : ltype(x) : "m" (__m(addr)))
19719@@ -406,13 +458,24 @@ do { \
19720 int __gu_err; \
19721 unsigned long __gu_val; \
19722 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
19723- (x) = (__force __typeof__(*(ptr)))__gu_val; \
19724+ (x) = (__typeof__(*(ptr)))__gu_val; \
19725 __gu_err; \
19726 })
19727
19728 /* FIXME: this hack is definitely wrong -AK */
19729 struct __large_struct { unsigned long buf[100]; };
19730-#define __m(x) (*(struct __large_struct __user *)(x))
19731+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19732+#define ____m(x) \
19733+({ \
19734+ unsigned long ____x = (unsigned long)(x); \
19735+ if (____x < pax_user_shadow_base) \
19736+ ____x += pax_user_shadow_base; \
19737+ (typeof(x))____x; \
19738+})
19739+#else
19740+#define ____m(x) (x)
19741+#endif
19742+#define __m(x) (*(struct __large_struct __user *)____m(x))
19743
19744 /*
19745 * Tell gcc we read from memory instead of writing: this is because
19746@@ -420,8 +483,10 @@ struct __large_struct { unsigned long buf[100]; };
19747 * aliasing issues.
19748 */
19749 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
19750+do { \
19751+ pax_open_userland(); \
19752 asm volatile(ASM_STAC "\n" \
19753- "1: mov"itype" %"rtype"1,%2\n" \
19754+ "1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
19755 "2: " ASM_CLAC "\n" \
19756 ".section .fixup,\"ax\"\n" \
19757 "3: mov %3,%0\n" \
19758@@ -429,10 +494,12 @@ struct __large_struct { unsigned long buf[100]; };
19759 ".previous\n" \
19760 _ASM_EXTABLE(1b, 3b) \
19761 : "=r"(err) \
19762- : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
19763+ : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err));\
19764+ pax_close_userland(); \
19765+} while (0)
19766
19767 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
19768- asm volatile("1: mov"itype" %"rtype"0,%1\n" \
19769+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
19770 "2:\n" \
19771 _ASM_EXTABLE_EX(1b, 2b) \
19772 : : ltype(x), "m" (__m(addr)))
19773@@ -442,11 +509,13 @@ struct __large_struct { unsigned long buf[100]; };
19774 */
19775 #define uaccess_try do { \
19776 current_thread_info()->uaccess_err = 0; \
19777+ pax_open_userland(); \
19778 stac(); \
19779 barrier();
19780
19781 #define uaccess_catch(err) \
19782 clac(); \
19783+ pax_close_userland(); \
19784 (err) |= (current_thread_info()->uaccess_err ? -EFAULT : 0); \
19785 } while (0)
19786
19787@@ -471,8 +540,12 @@ struct __large_struct { unsigned long buf[100]; };
19788 * On error, the variable @x is set to zero.
19789 */
19790
19791+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19792+#define __get_user(x, ptr) get_user((x), (ptr))
19793+#else
19794 #define __get_user(x, ptr) \
19795 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
19796+#endif
19797
19798 /**
19799 * __put_user: - Write a simple value into user space, with less checking.
19800@@ -494,8 +567,12 @@ struct __large_struct { unsigned long buf[100]; };
19801 * Returns zero on success, or -EFAULT on error.
19802 */
19803
19804+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19805+#define __put_user(x, ptr) put_user((x), (ptr))
19806+#else
19807 #define __put_user(x, ptr) \
19808 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
19809+#endif
19810
19811 #define __get_user_unaligned __get_user
19812 #define __put_user_unaligned __put_user
19813@@ -513,7 +590,7 @@ struct __large_struct { unsigned long buf[100]; };
19814 #define get_user_ex(x, ptr) do { \
19815 unsigned long __gue_val; \
19816 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
19817- (x) = (__force __typeof__(*(ptr)))__gue_val; \
19818+ (x) = (__typeof__(*(ptr)))__gue_val; \
19819 } while (0)
19820
19821 #define put_user_try uaccess_try
19822@@ -531,7 +608,7 @@ extern __must_check long strlen_user(const char __user *str);
19823 extern __must_check long strnlen_user(const char __user *str, long n);
19824
19825 unsigned long __must_check clear_user(void __user *mem, unsigned long len);
19826-unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
19827+unsigned long __must_check __clear_user(void __user *mem, unsigned long len) __size_overflow(2);
19828
19829 extern void __cmpxchg_wrong_size(void)
19830 __compiletime_error("Bad argument size for cmpxchg");
19831@@ -542,18 +619,19 @@ extern void __cmpxchg_wrong_size(void)
19832 __typeof__(ptr) __uval = (uval); \
19833 __typeof__(*(ptr)) __old = (old); \
19834 __typeof__(*(ptr)) __new = (new); \
19835+ pax_open_userland(); \
19836 switch (size) { \
19837 case 1: \
19838 { \
19839 asm volatile("\t" ASM_STAC "\n" \
19840- "1:\t" LOCK_PREFIX "cmpxchgb %4, %2\n" \
19841+ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgb %4, %2\n"\
19842 "2:\t" ASM_CLAC "\n" \
19843 "\t.section .fixup, \"ax\"\n" \
19844 "3:\tmov %3, %0\n" \
19845 "\tjmp 2b\n" \
19846 "\t.previous\n" \
19847 _ASM_EXTABLE(1b, 3b) \
19848- : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
19849+ : "+r" (__ret), "=a" (__old), "+m" (*____m(ptr))\
19850 : "i" (-EFAULT), "q" (__new), "1" (__old) \
19851 : "memory" \
19852 ); \
19853@@ -562,14 +640,14 @@ extern void __cmpxchg_wrong_size(void)
19854 case 2: \
19855 { \
19856 asm volatile("\t" ASM_STAC "\n" \
19857- "1:\t" LOCK_PREFIX "cmpxchgw %4, %2\n" \
19858+ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgw %4, %2\n"\
19859 "2:\t" ASM_CLAC "\n" \
19860 "\t.section .fixup, \"ax\"\n" \
19861 "3:\tmov %3, %0\n" \
19862 "\tjmp 2b\n" \
19863 "\t.previous\n" \
19864 _ASM_EXTABLE(1b, 3b) \
19865- : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
19866+ : "+r" (__ret), "=a" (__old), "+m" (*____m(ptr))\
19867 : "i" (-EFAULT), "r" (__new), "1" (__old) \
19868 : "memory" \
19869 ); \
19870@@ -578,14 +656,14 @@ extern void __cmpxchg_wrong_size(void)
19871 case 4: \
19872 { \
19873 asm volatile("\t" ASM_STAC "\n" \
19874- "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n" \
19875+ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"\
19876 "2:\t" ASM_CLAC "\n" \
19877 "\t.section .fixup, \"ax\"\n" \
19878 "3:\tmov %3, %0\n" \
19879 "\tjmp 2b\n" \
19880 "\t.previous\n" \
19881 _ASM_EXTABLE(1b, 3b) \
19882- : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
19883+ : "+r" (__ret), "=a" (__old), "+m" (*____m(ptr))\
19884 : "i" (-EFAULT), "r" (__new), "1" (__old) \
19885 : "memory" \
19886 ); \
19887@@ -597,14 +675,14 @@ extern void __cmpxchg_wrong_size(void)
19888 __cmpxchg_wrong_size(); \
19889 \
19890 asm volatile("\t" ASM_STAC "\n" \
19891- "1:\t" LOCK_PREFIX "cmpxchgq %4, %2\n" \
19892+ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgq %4, %2\n"\
19893 "2:\t" ASM_CLAC "\n" \
19894 "\t.section .fixup, \"ax\"\n" \
19895 "3:\tmov %3, %0\n" \
19896 "\tjmp 2b\n" \
19897 "\t.previous\n" \
19898 _ASM_EXTABLE(1b, 3b) \
19899- : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
19900+ : "+r" (__ret), "=a" (__old), "+m" (*____m(ptr))\
19901 : "i" (-EFAULT), "r" (__new), "1" (__old) \
19902 : "memory" \
19903 ); \
19904@@ -613,6 +691,7 @@ extern void __cmpxchg_wrong_size(void)
19905 default: \
19906 __cmpxchg_wrong_size(); \
19907 } \
19908+ pax_close_userland(); \
19909 *__uval = __old; \
19910 __ret; \
19911 })
19912@@ -636,17 +715,6 @@ extern struct movsl_mask {
19913
19914 #define ARCH_HAS_NOCACHE_UACCESS 1
19915
19916-#ifdef CONFIG_X86_32
19917-# include <asm/uaccess_32.h>
19918-#else
19919-# include <asm/uaccess_64.h>
19920-#endif
19921-
19922-unsigned long __must_check _copy_from_user(void *to, const void __user *from,
19923- unsigned n);
19924-unsigned long __must_check _copy_to_user(void __user *to, const void *from,
19925- unsigned n);
19926-
19927 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
19928 # define copy_user_diag __compiletime_error
19929 #else
19930@@ -656,7 +724,7 @@ unsigned long __must_check _copy_to_user(void __user *to, const void *from,
19931 extern void copy_user_diag("copy_from_user() buffer size is too small")
19932 copy_from_user_overflow(void);
19933 extern void copy_user_diag("copy_to_user() buffer size is too small")
19934-copy_to_user_overflow(void) __asm__("copy_from_user_overflow");
19935+copy_to_user_overflow(void);
19936
19937 #undef copy_user_diag
19938
19939@@ -669,7 +737,7 @@ __copy_from_user_overflow(void) __asm__("copy_from_user_overflow");
19940
19941 extern void
19942 __compiletime_warning("copy_to_user() buffer size is not provably correct")
19943-__copy_to_user_overflow(void) __asm__("copy_from_user_overflow");
19944+__copy_to_user_overflow(void) __asm__("copy_to_user_overflow");
19945 #define __copy_to_user_overflow(size, count) __copy_to_user_overflow()
19946
19947 #else
19948@@ -684,10 +752,16 @@ __copy_from_user_overflow(int size, unsigned long count)
19949
19950 #endif
19951
19952+#ifdef CONFIG_X86_32
19953+# include <asm/uaccess_32.h>
19954+#else
19955+# include <asm/uaccess_64.h>
19956+#endif
19957+
19958 static inline unsigned long __must_check
19959 copy_from_user(void *to, const void __user *from, unsigned long n)
19960 {
19961- int sz = __compiletime_object_size(to);
19962+ size_t sz = __compiletime_object_size(to);
19963
19964 might_fault();
19965
19966@@ -709,12 +783,15 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
19967 * case, and do only runtime checking for non-constant sizes.
19968 */
19969
19970- if (likely(sz < 0 || sz >= n))
19971- n = _copy_from_user(to, from, n);
19972- else if(__builtin_constant_p(n))
19973- copy_from_user_overflow();
19974- else
19975- __copy_from_user_overflow(sz, n);
19976+ if (likely(sz != (size_t)-1 && sz < n)) {
19977+ if(__builtin_constant_p(n))
19978+ copy_from_user_overflow();
19979+ else
19980+ __copy_from_user_overflow(sz, n);
19981+ } else if (access_ok(VERIFY_READ, from, n))
19982+ n = __copy_from_user(to, from, n);
19983+ else if ((long)n > 0)
19984+ memset(to, 0, n);
19985
19986 return n;
19987 }
19988@@ -722,17 +799,18 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
19989 static inline unsigned long __must_check
19990 copy_to_user(void __user *to, const void *from, unsigned long n)
19991 {
19992- int sz = __compiletime_object_size(from);
19993+ size_t sz = __compiletime_object_size(from);
19994
19995 might_fault();
19996
19997 /* See the comment in copy_from_user() above. */
19998- if (likely(sz < 0 || sz >= n))
19999- n = _copy_to_user(to, from, n);
20000- else if(__builtin_constant_p(n))
20001- copy_to_user_overflow();
20002- else
20003- __copy_to_user_overflow(sz, n);
20004+ if (likely(sz != (size_t)-1 && sz < n)) {
20005+ if(__builtin_constant_p(n))
20006+ copy_to_user_overflow();
20007+ else
20008+ __copy_to_user_overflow(sz, n);
20009+ } else if (access_ok(VERIFY_WRITE, to, n))
20010+ n = __copy_to_user(to, from, n);
20011
20012 return n;
20013 }
20014diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
20015index 3c03a5d..edb68ae 100644
20016--- a/arch/x86/include/asm/uaccess_32.h
20017+++ b/arch/x86/include/asm/uaccess_32.h
20018@@ -40,9 +40,14 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
20019 * anything, so this is accurate.
20020 */
20021
20022-static __always_inline unsigned long __must_check
20023+static __always_inline __size_overflow(3) unsigned long __must_check
20024 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
20025 {
20026+ if ((long)n < 0)
20027+ return n;
20028+
20029+ check_object_size(from, n, true);
20030+
20031 if (__builtin_constant_p(n)) {
20032 unsigned long ret;
20033
20034@@ -82,12 +87,16 @@ static __always_inline unsigned long __must_check
20035 __copy_to_user(void __user *to, const void *from, unsigned long n)
20036 {
20037 might_fault();
20038+
20039 return __copy_to_user_inatomic(to, from, n);
20040 }
20041
20042-static __always_inline unsigned long
20043+static __always_inline __size_overflow(3) unsigned long
20044 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
20045 {
20046+ if ((long)n < 0)
20047+ return n;
20048+
20049 /* Avoid zeroing the tail if the copy fails..
20050 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
20051 * but as the zeroing behaviour is only significant when n is not
20052@@ -137,6 +146,12 @@ static __always_inline unsigned long
20053 __copy_from_user(void *to, const void __user *from, unsigned long n)
20054 {
20055 might_fault();
20056+
20057+ if ((long)n < 0)
20058+ return n;
20059+
20060+ check_object_size(to, n, false);
20061+
20062 if (__builtin_constant_p(n)) {
20063 unsigned long ret;
20064
20065@@ -159,6 +174,10 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
20066 const void __user *from, unsigned long n)
20067 {
20068 might_fault();
20069+
20070+ if ((long)n < 0)
20071+ return n;
20072+
20073 if (__builtin_constant_p(n)) {
20074 unsigned long ret;
20075
20076@@ -181,7 +200,10 @@ static __always_inline unsigned long
20077 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
20078 unsigned long n)
20079 {
20080- return __copy_from_user_ll_nocache_nozero(to, from, n);
20081+ if ((long)n < 0)
20082+ return n;
20083+
20084+ return __copy_from_user_ll_nocache_nozero(to, from, n);
20085 }
20086
20087 #endif /* _ASM_X86_UACCESS_32_H */
20088diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
20089index f2f9b39..2ae1bf8 100644
20090--- a/arch/x86/include/asm/uaccess_64.h
20091+++ b/arch/x86/include/asm/uaccess_64.h
20092@@ -10,6 +10,9 @@
20093 #include <asm/alternative.h>
20094 #include <asm/cpufeature.h>
20095 #include <asm/page.h>
20096+#include <asm/pgtable.h>
20097+
20098+#define set_fs(x) (current_thread_info()->addr_limit = (x))
20099
20100 /*
20101 * Copy To/From Userspace
20102@@ -23,8 +26,8 @@ copy_user_generic_string(void *to, const void *from, unsigned len);
20103 __must_check unsigned long
20104 copy_user_generic_unrolled(void *to, const void *from, unsigned len);
20105
20106-static __always_inline __must_check unsigned long
20107-copy_user_generic(void *to, const void *from, unsigned len)
20108+static __always_inline __must_check __size_overflow(3) unsigned long
20109+copy_user_generic(void *to, const void *from, unsigned long len)
20110 {
20111 unsigned ret;
20112
20113@@ -46,121 +49,170 @@ copy_user_generic(void *to, const void *from, unsigned len)
20114 }
20115
20116 __must_check unsigned long
20117-copy_in_user(void __user *to, const void __user *from, unsigned len);
20118+copy_in_user(void __user *to, const void __user *from, unsigned long len);
20119
20120 static __always_inline __must_check
20121-int __copy_from_user_nocheck(void *dst, const void __user *src, unsigned size)
20122+unsigned long __copy_from_user_nocheck(void *dst, const void __user *src, unsigned long size)
20123 {
20124- int ret = 0;
20125+ size_t sz = __compiletime_object_size(dst);
20126+ unsigned ret = 0;
20127+
20128+ if (size > INT_MAX)
20129+ return size;
20130+
20131+ check_object_size(dst, size, false);
20132+
20133+#ifdef CONFIG_PAX_MEMORY_UDEREF
20134+ if (!access_ok_noprefault(VERIFY_READ, src, size))
20135+ return size;
20136+#endif
20137+
20138+ if (unlikely(sz != (size_t)-1 && sz < size)) {
20139+ if(__builtin_constant_p(size))
20140+ copy_from_user_overflow();
20141+ else
20142+ __copy_from_user_overflow(sz, size);
20143+ return size;
20144+ }
20145
20146 if (!__builtin_constant_p(size))
20147- return copy_user_generic(dst, (__force void *)src, size);
20148+ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
20149 switch (size) {
20150- case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
20151+ case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
20152 ret, "b", "b", "=q", 1);
20153 return ret;
20154- case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
20155+ case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
20156 ret, "w", "w", "=r", 2);
20157 return ret;
20158- case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
20159+ case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
20160 ret, "l", "k", "=r", 4);
20161 return ret;
20162- case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
20163+ case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
20164 ret, "q", "", "=r", 8);
20165 return ret;
20166 case 10:
20167- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
20168+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
20169 ret, "q", "", "=r", 10);
20170 if (unlikely(ret))
20171 return ret;
20172 __get_user_asm(*(u16 *)(8 + (char *)dst),
20173- (u16 __user *)(8 + (char __user *)src),
20174+ (const u16 __user *)(8 + (const char __user *)src),
20175 ret, "w", "w", "=r", 2);
20176 return ret;
20177 case 16:
20178- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
20179+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
20180 ret, "q", "", "=r", 16);
20181 if (unlikely(ret))
20182 return ret;
20183 __get_user_asm(*(u64 *)(8 + (char *)dst),
20184- (u64 __user *)(8 + (char __user *)src),
20185+ (const u64 __user *)(8 + (const char __user *)src),
20186 ret, "q", "", "=r", 8);
20187 return ret;
20188 default:
20189- return copy_user_generic(dst, (__force void *)src, size);
20190+ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
20191 }
20192 }
20193
20194 static __always_inline __must_check
20195-int __copy_from_user(void *dst, const void __user *src, unsigned size)
20196+unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
20197 {
20198 might_fault();
20199 return __copy_from_user_nocheck(dst, src, size);
20200 }
20201
20202 static __always_inline __must_check
20203-int __copy_to_user_nocheck(void __user *dst, const void *src, unsigned size)
20204+unsigned long __copy_to_user_nocheck(void __user *dst, const void *src, unsigned long size)
20205 {
20206- int ret = 0;
20207+ size_t sz = __compiletime_object_size(src);
20208+ unsigned ret = 0;
20209+
20210+ if (size > INT_MAX)
20211+ return size;
20212+
20213+ check_object_size(src, size, true);
20214+
20215+#ifdef CONFIG_PAX_MEMORY_UDEREF
20216+ if (!access_ok_noprefault(VERIFY_WRITE, dst, size))
20217+ return size;
20218+#endif
20219+
20220+ if (unlikely(sz != (size_t)-1 && sz < size)) {
20221+ if(__builtin_constant_p(size))
20222+ copy_to_user_overflow();
20223+ else
20224+ __copy_to_user_overflow(sz, size);
20225+ return size;
20226+ }
20227
20228 if (!__builtin_constant_p(size))
20229- return copy_user_generic((__force void *)dst, src, size);
20230+ return copy_user_generic((__force_kernel void *)____m(dst), src, size);
20231 switch (size) {
20232- case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
20233+ case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
20234 ret, "b", "b", "iq", 1);
20235 return ret;
20236- case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
20237+ case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
20238 ret, "w", "w", "ir", 2);
20239 return ret;
20240- case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
20241+ case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
20242 ret, "l", "k", "ir", 4);
20243 return ret;
20244- case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
20245+ case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
20246 ret, "q", "", "er", 8);
20247 return ret;
20248 case 10:
20249- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
20250+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
20251 ret, "q", "", "er", 10);
20252 if (unlikely(ret))
20253 return ret;
20254 asm("":::"memory");
20255- __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
20256+ __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
20257 ret, "w", "w", "ir", 2);
20258 return ret;
20259 case 16:
20260- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
20261+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
20262 ret, "q", "", "er", 16);
20263 if (unlikely(ret))
20264 return ret;
20265 asm("":::"memory");
20266- __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
20267+ __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
20268 ret, "q", "", "er", 8);
20269 return ret;
20270 default:
20271- return copy_user_generic((__force void *)dst, src, size);
20272+ return copy_user_generic((__force_kernel void *)____m(dst), src, size);
20273 }
20274 }
20275
20276 static __always_inline __must_check
20277-int __copy_to_user(void __user *dst, const void *src, unsigned size)
20278+unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
20279 {
20280 might_fault();
20281 return __copy_to_user_nocheck(dst, src, size);
20282 }
20283
20284 static __always_inline __must_check
20285-int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20286+unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20287 {
20288- int ret = 0;
20289+ unsigned ret = 0;
20290
20291 might_fault();
20292+
20293+ if (size > INT_MAX)
20294+ return size;
20295+
20296+#ifdef CONFIG_PAX_MEMORY_UDEREF
20297+ if (!access_ok_noprefault(VERIFY_READ, src, size))
20298+ return size;
20299+ if (!access_ok_noprefault(VERIFY_WRITE, dst, size))
20300+ return size;
20301+#endif
20302+
20303 if (!__builtin_constant_p(size))
20304- return copy_user_generic((__force void *)dst,
20305- (__force void *)src, size);
20306+ return copy_user_generic((__force_kernel void *)____m(dst),
20307+ (__force_kernel const void *)____m(src), size);
20308 switch (size) {
20309 case 1: {
20310 u8 tmp;
20311- __get_user_asm(tmp, (u8 __user *)src,
20312+ __get_user_asm(tmp, (const u8 __user *)src,
20313 ret, "b", "b", "=q", 1);
20314 if (likely(!ret))
20315 __put_user_asm(tmp, (u8 __user *)dst,
20316@@ -169,7 +221,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20317 }
20318 case 2: {
20319 u16 tmp;
20320- __get_user_asm(tmp, (u16 __user *)src,
20321+ __get_user_asm(tmp, (const u16 __user *)src,
20322 ret, "w", "w", "=r", 2);
20323 if (likely(!ret))
20324 __put_user_asm(tmp, (u16 __user *)dst,
20325@@ -179,7 +231,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20326
20327 case 4: {
20328 u32 tmp;
20329- __get_user_asm(tmp, (u32 __user *)src,
20330+ __get_user_asm(tmp, (const u32 __user *)src,
20331 ret, "l", "k", "=r", 4);
20332 if (likely(!ret))
20333 __put_user_asm(tmp, (u32 __user *)dst,
20334@@ -188,7 +240,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20335 }
20336 case 8: {
20337 u64 tmp;
20338- __get_user_asm(tmp, (u64 __user *)src,
20339+ __get_user_asm(tmp, (const u64 __user *)src,
20340 ret, "q", "", "=r", 8);
20341 if (likely(!ret))
20342 __put_user_asm(tmp, (u64 __user *)dst,
20343@@ -196,41 +248,58 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20344 return ret;
20345 }
20346 default:
20347- return copy_user_generic((__force void *)dst,
20348- (__force void *)src, size);
20349+ return copy_user_generic((__force_kernel void *)____m(dst),
20350+ (__force_kernel const void *)____m(src), size);
20351 }
20352 }
20353
20354-static __must_check __always_inline int
20355-__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
20356+static __must_check __always_inline unsigned long
20357+__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
20358 {
20359 return __copy_from_user_nocheck(dst, src, size);
20360 }
20361
20362-static __must_check __always_inline int
20363-__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
20364+static __must_check __always_inline unsigned long
20365+__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
20366 {
20367 return __copy_to_user_nocheck(dst, src, size);
20368 }
20369
20370-extern long __copy_user_nocache(void *dst, const void __user *src,
20371- unsigned size, int zerorest);
20372+extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
20373+ unsigned long size, int zerorest);
20374
20375-static inline int
20376-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
20377+static inline unsigned long
20378+__copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
20379 {
20380 might_fault();
20381+
20382+ if (size > INT_MAX)
20383+ return size;
20384+
20385+#ifdef CONFIG_PAX_MEMORY_UDEREF
20386+ if (!access_ok_noprefault(VERIFY_READ, src, size))
20387+ return size;
20388+#endif
20389+
20390 return __copy_user_nocache(dst, src, size, 1);
20391 }
20392
20393-static inline int
20394+static inline unsigned long
20395 __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
20396- unsigned size)
20397+ unsigned long size)
20398 {
20399+ if (size > INT_MAX)
20400+ return size;
20401+
20402+#ifdef CONFIG_PAX_MEMORY_UDEREF
20403+ if (!access_ok_noprefault(VERIFY_READ, src, size))
20404+ return size;
20405+#endif
20406+
20407 return __copy_user_nocache(dst, src, size, 0);
20408 }
20409
20410 unsigned long
20411-copy_user_handle_tail(char *to, char *from, unsigned len);
20412+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len) __size_overflow(3);
20413
20414 #endif /* _ASM_X86_UACCESS_64_H */
20415diff --git a/arch/x86/include/asm/word-at-a-time.h b/arch/x86/include/asm/word-at-a-time.h
20416index 5b238981..77fdd78 100644
20417--- a/arch/x86/include/asm/word-at-a-time.h
20418+++ b/arch/x86/include/asm/word-at-a-time.h
20419@@ -11,7 +11,7 @@
20420 * and shift, for example.
20421 */
20422 struct word_at_a_time {
20423- const unsigned long one_bits, high_bits;
20424+ unsigned long one_bits, high_bits;
20425 };
20426
20427 #define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
20428diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
20429index f58a9c7..dc378042a 100644
20430--- a/arch/x86/include/asm/x86_init.h
20431+++ b/arch/x86/include/asm/x86_init.h
20432@@ -129,7 +129,7 @@ struct x86_init_ops {
20433 struct x86_init_timers timers;
20434 struct x86_init_iommu iommu;
20435 struct x86_init_pci pci;
20436-};
20437+} __no_const;
20438
20439 /**
20440 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
20441@@ -140,7 +140,7 @@ struct x86_cpuinit_ops {
20442 void (*setup_percpu_clockev)(void);
20443 void (*early_percpu_clock_init)(void);
20444 void (*fixup_cpu_id)(struct cpuinfo_x86 *c, int node);
20445-};
20446+} __no_const;
20447
20448 struct timespec;
20449
20450@@ -168,7 +168,7 @@ struct x86_platform_ops {
20451 void (*save_sched_clock_state)(void);
20452 void (*restore_sched_clock_state)(void);
20453 void (*apic_post_init)(void);
20454-};
20455+} __no_const;
20456
20457 struct pci_dev;
20458 struct msi_msg;
20459@@ -182,7 +182,7 @@ struct x86_msi_ops {
20460 void (*teardown_msi_irqs)(struct pci_dev *dev);
20461 void (*restore_msi_irqs)(struct pci_dev *dev);
20462 int (*setup_hpet_msi)(unsigned int irq, unsigned int id);
20463-};
20464+} __no_const;
20465
20466 struct IO_APIC_route_entry;
20467 struct io_apic_irq_attr;
20468@@ -203,7 +203,7 @@ struct x86_io_apic_ops {
20469 unsigned int destination, int vector,
20470 struct io_apic_irq_attr *attr);
20471 void (*eoi_ioapic_pin)(int apic, int pin, int vector);
20472-};
20473+} __no_const;
20474
20475 extern struct x86_init_ops x86_init;
20476 extern struct x86_cpuinit_ops x86_cpuinit;
20477diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
20478index 358dcd3..23c0bf1 100644
20479--- a/arch/x86/include/asm/xen/page.h
20480+++ b/arch/x86/include/asm/xen/page.h
20481@@ -82,7 +82,7 @@ static inline int xen_safe_read_ulong(unsigned long *addr, unsigned long *val)
20482 * - get_phys_to_machine() is to be called by __pfn_to_mfn() only in special
20483 * cases needing an extended handling.
20484 */
20485-static inline unsigned long __pfn_to_mfn(unsigned long pfn)
20486+static inline unsigned long __intentional_overflow(-1) __pfn_to_mfn(unsigned long pfn)
20487 {
20488 unsigned long mfn;
20489
20490diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
20491index c9a6d68..cb57f42 100644
20492--- a/arch/x86/include/asm/xsave.h
20493+++ b/arch/x86/include/asm/xsave.h
20494@@ -223,12 +223,16 @@ static inline int xsave_user(struct xsave_struct __user *buf)
20495 if (unlikely(err))
20496 return -EFAULT;
20497
20498+ pax_open_userland();
20499 __asm__ __volatile__(ASM_STAC "\n"
20500- "1:"XSAVE"\n"
20501+ "1:"
20502+ __copyuser_seg
20503+ XSAVE"\n"
20504 "2: " ASM_CLAC "\n"
20505 xstate_fault
20506 : "D" (buf), "a" (-1), "d" (-1), "0" (0)
20507 : "memory");
20508+ pax_close_userland();
20509 return err;
20510 }
20511
20512@@ -238,16 +242,20 @@ static inline int xsave_user(struct xsave_struct __user *buf)
20513 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
20514 {
20515 int err = 0;
20516- struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
20517+ struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
20518 u32 lmask = mask;
20519 u32 hmask = mask >> 32;
20520
20521+ pax_open_userland();
20522 __asm__ __volatile__(ASM_STAC "\n"
20523- "1:"XRSTOR"\n"
20524+ "1:"
20525+ __copyuser_seg
20526+ XRSTOR"\n"
20527 "2: " ASM_CLAC "\n"
20528 xstate_fault
20529 : "D" (xstate), "a" (lmask), "d" (hmask), "0" (0)
20530 : "memory"); /* memory required? */
20531+ pax_close_userland();
20532 return err;
20533 }
20534
20535diff --git a/arch/x86/include/uapi/asm/e820.h b/arch/x86/include/uapi/asm/e820.h
20536index d993e33..8db1b18 100644
20537--- a/arch/x86/include/uapi/asm/e820.h
20538+++ b/arch/x86/include/uapi/asm/e820.h
20539@@ -58,7 +58,7 @@ struct e820map {
20540 #define ISA_START_ADDRESS 0xa0000
20541 #define ISA_END_ADDRESS 0x100000
20542
20543-#define BIOS_BEGIN 0x000a0000
20544+#define BIOS_BEGIN 0x000c0000
20545 #define BIOS_END 0x00100000
20546
20547 #define BIOS_ROM_BASE 0xffe00000
20548diff --git a/arch/x86/include/uapi/asm/ptrace-abi.h b/arch/x86/include/uapi/asm/ptrace-abi.h
20549index 7b0a55a..ad115bf 100644
20550--- a/arch/x86/include/uapi/asm/ptrace-abi.h
20551+++ b/arch/x86/include/uapi/asm/ptrace-abi.h
20552@@ -49,7 +49,6 @@
20553 #define EFLAGS 144
20554 #define RSP 152
20555 #define SS 160
20556-#define ARGOFFSET R11
20557 #endif /* __ASSEMBLY__ */
20558
20559 /* top of stack page */
20560diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
20561index cdb1b70..426434c 100644
20562--- a/arch/x86/kernel/Makefile
20563+++ b/arch/x86/kernel/Makefile
20564@@ -28,7 +28,7 @@ obj-y += time.o ioport.o ldt.o dumpstack.o nmi.o
20565 obj-y += setup.o x86_init.o i8259.o irqinit.o jump_label.o
20566 obj-$(CONFIG_IRQ_WORK) += irq_work.o
20567 obj-y += probe_roms.o
20568-obj-$(CONFIG_X86_32) += i386_ksyms_32.o
20569+obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o
20570 obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o
20571 obj-$(CONFIG_X86_64) += mcount_64.o
20572 obj-y += syscall_$(BITS).o vsyscall_gtod.o
20573diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
20574index 803b684..68c64f1 100644
20575--- a/arch/x86/kernel/acpi/boot.c
20576+++ b/arch/x86/kernel/acpi/boot.c
20577@@ -1361,7 +1361,7 @@ static void __init acpi_reduced_hw_init(void)
20578 * If your system is blacklisted here, but you find that acpi=force
20579 * works for you, please contact linux-acpi@vger.kernel.org
20580 */
20581-static struct dmi_system_id __initdata acpi_dmi_table[] = {
20582+static const struct dmi_system_id __initconst acpi_dmi_table[] = {
20583 /*
20584 * Boxes that need ACPI disabled
20585 */
20586@@ -1436,7 +1436,7 @@ static struct dmi_system_id __initdata acpi_dmi_table[] = {
20587 };
20588
20589 /* second table for DMI checks that should run after early-quirks */
20590-static struct dmi_system_id __initdata acpi_dmi_table_late[] = {
20591+static const struct dmi_system_id __initconst acpi_dmi_table_late[] = {
20592 /*
20593 * HP laptops which use a DSDT reporting as HP/SB400/10000,
20594 * which includes some code which overrides all temperature
20595diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
20596index d1daead..acd77e2 100644
20597--- a/arch/x86/kernel/acpi/sleep.c
20598+++ b/arch/x86/kernel/acpi/sleep.c
20599@@ -99,8 +99,12 @@ int x86_acpi_suspend_lowlevel(void)
20600 #else /* CONFIG_64BIT */
20601 #ifdef CONFIG_SMP
20602 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
20603+
20604+ pax_open_kernel();
20605 early_gdt_descr.address =
20606 (unsigned long)get_cpu_gdt_table(smp_processor_id());
20607+ pax_close_kernel();
20608+
20609 initial_gs = per_cpu_offset(smp_processor_id());
20610 #endif
20611 initial_code = (unsigned long)wakeup_long64;
20612diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
20613index 665c6b7..eae4d56 100644
20614--- a/arch/x86/kernel/acpi/wakeup_32.S
20615+++ b/arch/x86/kernel/acpi/wakeup_32.S
20616@@ -29,13 +29,11 @@ wakeup_pmode_return:
20617 # and restore the stack ... but you need gdt for this to work
20618 movl saved_context_esp, %esp
20619
20620- movl %cs:saved_magic, %eax
20621- cmpl $0x12345678, %eax
20622+ cmpl $0x12345678, saved_magic
20623 jne bogus_magic
20624
20625 # jump to place where we left off
20626- movl saved_eip, %eax
20627- jmp *%eax
20628+ jmp *(saved_eip)
20629
20630 bogus_magic:
20631 jmp bogus_magic
20632diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
20633index 703130f..27a155d 100644
20634--- a/arch/x86/kernel/alternative.c
20635+++ b/arch/x86/kernel/alternative.c
20636@@ -268,6 +268,13 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
20637 */
20638 for (a = start; a < end; a++) {
20639 instr = (u8 *)&a->instr_offset + a->instr_offset;
20640+
20641+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20642+ instr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20643+ if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
20644+ instr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20645+#endif
20646+
20647 replacement = (u8 *)&a->repl_offset + a->repl_offset;
20648 BUG_ON(a->replacementlen > a->instrlen);
20649 BUG_ON(a->instrlen > sizeof(insnbuf));
20650@@ -284,6 +291,11 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
20651 add_nops(insnbuf + a->replacementlen,
20652 a->instrlen - a->replacementlen);
20653
20654+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20655+ if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
20656+ instr = ktva_ktla(instr);
20657+#endif
20658+
20659 text_poke_early(instr, insnbuf, a->instrlen);
20660 }
20661 }
20662@@ -299,10 +311,16 @@ static void alternatives_smp_lock(const s32 *start, const s32 *end,
20663 for (poff = start; poff < end; poff++) {
20664 u8 *ptr = (u8 *)poff + *poff;
20665
20666+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20667+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20668+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
20669+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20670+#endif
20671+
20672 if (!*poff || ptr < text || ptr >= text_end)
20673 continue;
20674 /* turn DS segment override prefix into lock prefix */
20675- if (*ptr == 0x3e)
20676+ if (*ktla_ktva(ptr) == 0x3e)
20677 text_poke(ptr, ((unsigned char []){0xf0}), 1);
20678 }
20679 mutex_unlock(&text_mutex);
20680@@ -317,10 +335,16 @@ static void alternatives_smp_unlock(const s32 *start, const s32 *end,
20681 for (poff = start; poff < end; poff++) {
20682 u8 *ptr = (u8 *)poff + *poff;
20683
20684+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20685+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20686+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
20687+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20688+#endif
20689+
20690 if (!*poff || ptr < text || ptr >= text_end)
20691 continue;
20692 /* turn lock prefix into DS segment override prefix */
20693- if (*ptr == 0xf0)
20694+ if (*ktla_ktva(ptr) == 0xf0)
20695 text_poke(ptr, ((unsigned char []){0x3E}), 1);
20696 }
20697 mutex_unlock(&text_mutex);
20698@@ -457,7 +481,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
20699
20700 BUG_ON(p->len > MAX_PATCH_LEN);
20701 /* prep the buffer with the original instructions */
20702- memcpy(insnbuf, p->instr, p->len);
20703+ memcpy(insnbuf, ktla_ktva(p->instr), p->len);
20704 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
20705 (unsigned long)p->instr, p->len);
20706
20707@@ -504,7 +528,7 @@ void __init alternative_instructions(void)
20708 if (!uniproc_patched || num_possible_cpus() == 1)
20709 free_init_pages("SMP alternatives",
20710 (unsigned long)__smp_locks,
20711- (unsigned long)__smp_locks_end);
20712+ PAGE_ALIGN((unsigned long)__smp_locks_end));
20713 #endif
20714
20715 apply_paravirt(__parainstructions, __parainstructions_end);
20716@@ -524,13 +548,17 @@ void __init alternative_instructions(void)
20717 * instructions. And on the local CPU you need to be protected again NMI or MCE
20718 * handlers seeing an inconsistent instruction while you patch.
20719 */
20720-void *__init_or_module text_poke_early(void *addr, const void *opcode,
20721+void *__kprobes text_poke_early(void *addr, const void *opcode,
20722 size_t len)
20723 {
20724 unsigned long flags;
20725 local_irq_save(flags);
20726- memcpy(addr, opcode, len);
20727+
20728+ pax_open_kernel();
20729+ memcpy(ktla_ktva(addr), opcode, len);
20730 sync_core();
20731+ pax_close_kernel();
20732+
20733 local_irq_restore(flags);
20734 /* Could also do a CLFLUSH here to speed up CPU recovery; but
20735 that causes hangs on some VIA CPUs. */
20736@@ -552,36 +580,22 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
20737 */
20738 void *text_poke(void *addr, const void *opcode, size_t len)
20739 {
20740- unsigned long flags;
20741- char *vaddr;
20742+ unsigned char *vaddr = ktla_ktva(addr);
20743 struct page *pages[2];
20744- int i;
20745+ size_t i;
20746
20747 if (!core_kernel_text((unsigned long)addr)) {
20748- pages[0] = vmalloc_to_page(addr);
20749- pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
20750+ pages[0] = vmalloc_to_page(vaddr);
20751+ pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
20752 } else {
20753- pages[0] = virt_to_page(addr);
20754+ pages[0] = virt_to_page(vaddr);
20755 WARN_ON(!PageReserved(pages[0]));
20756- pages[1] = virt_to_page(addr + PAGE_SIZE);
20757+ pages[1] = virt_to_page(vaddr + PAGE_SIZE);
20758 }
20759 BUG_ON(!pages[0]);
20760- local_irq_save(flags);
20761- set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
20762- if (pages[1])
20763- set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
20764- vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
20765- memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
20766- clear_fixmap(FIX_TEXT_POKE0);
20767- if (pages[1])
20768- clear_fixmap(FIX_TEXT_POKE1);
20769- local_flush_tlb();
20770- sync_core();
20771- /* Could also do a CLFLUSH here to speed up CPU recovery; but
20772- that causes hangs on some VIA CPUs. */
20773+ text_poke_early(addr, opcode, len);
20774 for (i = 0; i < len; i++)
20775- BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
20776- local_irq_restore(flags);
20777+ BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
20778 return addr;
20779 }
20780
20781@@ -601,7 +615,7 @@ int poke_int3_handler(struct pt_regs *regs)
20782 if (likely(!bp_patching_in_progress))
20783 return 0;
20784
20785- if (user_mode_vm(regs) || regs->ip != (unsigned long)bp_int3_addr)
20786+ if (user_mode(regs) || regs->ip != (unsigned long)bp_int3_addr)
20787 return 0;
20788
20789 /* set up the specified breakpoint handler */
20790@@ -635,7 +649,7 @@ int poke_int3_handler(struct pt_regs *regs)
20791 */
20792 void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler)
20793 {
20794- unsigned char int3 = 0xcc;
20795+ const unsigned char int3 = 0xcc;
20796
20797 bp_int3_handler = handler;
20798 bp_int3_addr = (u8 *)addr + sizeof(int3);
20799diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
20800index ad3639a..bd4253c 100644
20801--- a/arch/x86/kernel/apic/apic.c
20802+++ b/arch/x86/kernel/apic/apic.c
20803@@ -171,7 +171,7 @@ int first_system_vector = FIRST_SYSTEM_VECTOR;
20804 /*
20805 * Debug level, exported for io_apic.c
20806 */
20807-unsigned int apic_verbosity;
20808+int apic_verbosity;
20809
20810 int pic_mode;
20811
20812@@ -1918,7 +1918,7 @@ static inline void __smp_error_interrupt(struct pt_regs *regs)
20813 apic_write(APIC_ESR, 0);
20814 v = apic_read(APIC_ESR);
20815 ack_APIC_irq();
20816- atomic_inc(&irq_err_count);
20817+ atomic_inc_unchecked(&irq_err_count);
20818
20819 apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x",
20820 smp_processor_id(), v);
20821diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c
20822index de918c4..32eed23 100644
20823--- a/arch/x86/kernel/apic/apic_flat_64.c
20824+++ b/arch/x86/kernel/apic/apic_flat_64.c
20825@@ -154,7 +154,7 @@ static int flat_probe(void)
20826 return 1;
20827 }
20828
20829-static struct apic apic_flat = {
20830+static struct apic apic_flat __read_only = {
20831 .name = "flat",
20832 .probe = flat_probe,
20833 .acpi_madt_oem_check = flat_acpi_madt_oem_check,
20834@@ -260,7 +260,7 @@ static int physflat_probe(void)
20835 return 0;
20836 }
20837
20838-static struct apic apic_physflat = {
20839+static struct apic apic_physflat __read_only = {
20840
20841 .name = "physical flat",
20842 .probe = physflat_probe,
20843diff --git a/arch/x86/kernel/apic/apic_noop.c b/arch/x86/kernel/apic/apic_noop.c
20844index b205cdb..d8503ff 100644
20845--- a/arch/x86/kernel/apic/apic_noop.c
20846+++ b/arch/x86/kernel/apic/apic_noop.c
20847@@ -108,7 +108,7 @@ static void noop_apic_write(u32 reg, u32 v)
20848 WARN_ON_ONCE(cpu_has_apic && !disable_apic);
20849 }
20850
20851-struct apic apic_noop = {
20852+struct apic apic_noop __read_only = {
20853 .name = "noop",
20854 .probe = noop_probe,
20855 .acpi_madt_oem_check = NULL,
20856diff --git a/arch/x86/kernel/apic/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c
20857index c4a8d63..fe893ac 100644
20858--- a/arch/x86/kernel/apic/bigsmp_32.c
20859+++ b/arch/x86/kernel/apic/bigsmp_32.c
20860@@ -147,7 +147,7 @@ static int probe_bigsmp(void)
20861 return dmi_bigsmp;
20862 }
20863
20864-static struct apic apic_bigsmp = {
20865+static struct apic apic_bigsmp __read_only = {
20866
20867 .name = "bigsmp",
20868 .probe = probe_bigsmp,
20869diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
20870index f4dc246..fbab133 100644
20871--- a/arch/x86/kernel/apic/io_apic.c
20872+++ b/arch/x86/kernel/apic/io_apic.c
20873@@ -1862,7 +1862,7 @@ int native_ioapic_set_affinity(struct irq_data *data,
20874 return ret;
20875 }
20876
20877-atomic_t irq_mis_count;
20878+atomic_unchecked_t irq_mis_count;
20879
20880 #ifdef CONFIG_GENERIC_PENDING_IRQ
20881 static bool io_apic_level_ack_pending(struct irq_cfg *cfg)
20882@@ -2003,7 +2003,7 @@ static void ack_ioapic_level(struct irq_data *data)
20883 * at the cpu.
20884 */
20885 if (!(v & (1 << (i & 0x1f)))) {
20886- atomic_inc(&irq_mis_count);
20887+ atomic_inc_unchecked(&irq_mis_count);
20888
20889 eoi_ioapic_irq(irq, cfg);
20890 }
20891@@ -2011,7 +2011,7 @@ static void ack_ioapic_level(struct irq_data *data)
20892 ioapic_irqd_unmask(data, cfg, masked);
20893 }
20894
20895-static struct irq_chip ioapic_chip __read_mostly = {
20896+static struct irq_chip ioapic_chip = {
20897 .name = "IO-APIC",
20898 .irq_startup = startup_ioapic_irq,
20899 .irq_mask = mask_ioapic_irq,
20900@@ -2070,7 +2070,7 @@ static void ack_lapic_irq(struct irq_data *data)
20901 ack_APIC_irq();
20902 }
20903
20904-static struct irq_chip lapic_chip __read_mostly = {
20905+static struct irq_chip lapic_chip = {
20906 .name = "local-APIC",
20907 .irq_mask = mask_lapic_irq,
20908 .irq_unmask = unmask_lapic_irq,
20909diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c
20910index bda4886..f9c7195 100644
20911--- a/arch/x86/kernel/apic/probe_32.c
20912+++ b/arch/x86/kernel/apic/probe_32.c
20913@@ -72,7 +72,7 @@ static int probe_default(void)
20914 return 1;
20915 }
20916
20917-static struct apic apic_default = {
20918+static struct apic apic_default __read_only = {
20919
20920 .name = "default",
20921 .probe = probe_default,
20922diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
20923index 6cedd79..023ff8e 100644
20924--- a/arch/x86/kernel/apic/vector.c
20925+++ b/arch/x86/kernel/apic/vector.c
20926@@ -21,7 +21,7 @@
20927
20928 static DEFINE_RAW_SPINLOCK(vector_lock);
20929
20930-void lock_vector_lock(void)
20931+void lock_vector_lock(void) __acquires(vector_lock)
20932 {
20933 /* Used to the online set of cpus does not change
20934 * during assign_irq_vector.
20935@@ -29,7 +29,7 @@ void lock_vector_lock(void)
20936 raw_spin_lock(&vector_lock);
20937 }
20938
20939-void unlock_vector_lock(void)
20940+void unlock_vector_lock(void) __releases(vector_lock)
20941 {
20942 raw_spin_unlock(&vector_lock);
20943 }
20944diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
20945index e658f21..b695a1a 100644
20946--- a/arch/x86/kernel/apic/x2apic_cluster.c
20947+++ b/arch/x86/kernel/apic/x2apic_cluster.c
20948@@ -182,7 +182,7 @@ update_clusterinfo(struct notifier_block *nfb, unsigned long action, void *hcpu)
20949 return notifier_from_errno(err);
20950 }
20951
20952-static struct notifier_block __refdata x2apic_cpu_notifier = {
20953+static struct notifier_block x2apic_cpu_notifier = {
20954 .notifier_call = update_clusterinfo,
20955 };
20956
20957@@ -234,7 +234,7 @@ static void cluster_vector_allocation_domain(int cpu, struct cpumask *retmask,
20958 cpumask_and(retmask, mask, per_cpu(cpus_in_cluster, cpu));
20959 }
20960
20961-static struct apic apic_x2apic_cluster = {
20962+static struct apic apic_x2apic_cluster __read_only = {
20963
20964 .name = "cluster x2apic",
20965 .probe = x2apic_cluster_probe,
20966diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c
20967index 6fae733..5ca17af 100644
20968--- a/arch/x86/kernel/apic/x2apic_phys.c
20969+++ b/arch/x86/kernel/apic/x2apic_phys.c
20970@@ -88,7 +88,7 @@ static int x2apic_phys_probe(void)
20971 return apic == &apic_x2apic_phys;
20972 }
20973
20974-static struct apic apic_x2apic_phys = {
20975+static struct apic apic_x2apic_phys __read_only = {
20976
20977 .name = "physical x2apic",
20978 .probe = x2apic_phys_probe,
20979diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
20980index 8e9dcfd..c61b3e4 100644
20981--- a/arch/x86/kernel/apic/x2apic_uv_x.c
20982+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
20983@@ -348,7 +348,7 @@ static int uv_probe(void)
20984 return apic == &apic_x2apic_uv_x;
20985 }
20986
20987-static struct apic __refdata apic_x2apic_uv_x = {
20988+static struct apic apic_x2apic_uv_x __read_only = {
20989
20990 .name = "UV large system",
20991 .probe = uv_probe,
20992diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
20993index 927ec92..de68f32 100644
20994--- a/arch/x86/kernel/apm_32.c
20995+++ b/arch/x86/kernel/apm_32.c
20996@@ -432,7 +432,7 @@ static DEFINE_MUTEX(apm_mutex);
20997 * This is for buggy BIOS's that refer to (real mode) segment 0x40
20998 * even though they are called in protected mode.
20999 */
21000-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
21001+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
21002 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
21003
21004 static const char driver_version[] = "1.16ac"; /* no spaces */
21005@@ -610,7 +610,10 @@ static long __apm_bios_call(void *_call)
21006 BUG_ON(cpu != 0);
21007 gdt = get_cpu_gdt_table(cpu);
21008 save_desc_40 = gdt[0x40 / 8];
21009+
21010+ pax_open_kernel();
21011 gdt[0x40 / 8] = bad_bios_desc;
21012+ pax_close_kernel();
21013
21014 apm_irq_save(flags);
21015 APM_DO_SAVE_SEGS;
21016@@ -619,7 +622,11 @@ static long __apm_bios_call(void *_call)
21017 &call->esi);
21018 APM_DO_RESTORE_SEGS;
21019 apm_irq_restore(flags);
21020+
21021+ pax_open_kernel();
21022 gdt[0x40 / 8] = save_desc_40;
21023+ pax_close_kernel();
21024+
21025 put_cpu();
21026
21027 return call->eax & 0xff;
21028@@ -686,7 +693,10 @@ static long __apm_bios_call_simple(void *_call)
21029 BUG_ON(cpu != 0);
21030 gdt = get_cpu_gdt_table(cpu);
21031 save_desc_40 = gdt[0x40 / 8];
21032+
21033+ pax_open_kernel();
21034 gdt[0x40 / 8] = bad_bios_desc;
21035+ pax_close_kernel();
21036
21037 apm_irq_save(flags);
21038 APM_DO_SAVE_SEGS;
21039@@ -694,7 +704,11 @@ static long __apm_bios_call_simple(void *_call)
21040 &call->eax);
21041 APM_DO_RESTORE_SEGS;
21042 apm_irq_restore(flags);
21043+
21044+ pax_open_kernel();
21045 gdt[0x40 / 8] = save_desc_40;
21046+ pax_close_kernel();
21047+
21048 put_cpu();
21049 return error;
21050 }
21051@@ -2039,7 +2053,7 @@ static int __init swab_apm_power_in_minutes(const struct dmi_system_id *d)
21052 return 0;
21053 }
21054
21055-static struct dmi_system_id __initdata apm_dmi_table[] = {
21056+static const struct dmi_system_id __initconst apm_dmi_table[] = {
21057 {
21058 print_if_true,
21059 KERN_WARNING "IBM T23 - BIOS 1.03b+ and controller firmware 1.02+ may be needed for Linux APM.",
21060@@ -2349,12 +2363,15 @@ static int __init apm_init(void)
21061 * code to that CPU.
21062 */
21063 gdt = get_cpu_gdt_table(0);
21064+
21065+ pax_open_kernel();
21066 set_desc_base(&gdt[APM_CS >> 3],
21067 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
21068 set_desc_base(&gdt[APM_CS_16 >> 3],
21069 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
21070 set_desc_base(&gdt[APM_DS >> 3],
21071 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
21072+ pax_close_kernel();
21073
21074 proc_create("apm", 0, NULL, &apm_file_ops);
21075
21076diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
21077index 9f6b934..cf5ffb3 100644
21078--- a/arch/x86/kernel/asm-offsets.c
21079+++ b/arch/x86/kernel/asm-offsets.c
21080@@ -32,6 +32,8 @@ void common(void) {
21081 OFFSET(TI_flags, thread_info, flags);
21082 OFFSET(TI_status, thread_info, status);
21083 OFFSET(TI_addr_limit, thread_info, addr_limit);
21084+ OFFSET(TI_lowest_stack, thread_info, lowest_stack);
21085+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
21086
21087 BLANK();
21088 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
21089@@ -52,8 +54,26 @@ void common(void) {
21090 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
21091 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
21092 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
21093+
21094+#ifdef CONFIG_PAX_KERNEXEC
21095+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
21096 #endif
21097
21098+#ifdef CONFIG_PAX_MEMORY_UDEREF
21099+ OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
21100+ OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
21101+#ifdef CONFIG_X86_64
21102+ OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
21103+#endif
21104+#endif
21105+
21106+#endif
21107+
21108+ BLANK();
21109+ DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
21110+ DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
21111+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
21112+
21113 #ifdef CONFIG_XEN
21114 BLANK();
21115 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
21116diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
21117index fdcbb4d..036dd93 100644
21118--- a/arch/x86/kernel/asm-offsets_64.c
21119+++ b/arch/x86/kernel/asm-offsets_64.c
21120@@ -80,6 +80,7 @@ int main(void)
21121 BLANK();
21122 #undef ENTRY
21123
21124+ DEFINE(TSS_size, sizeof(struct tss_struct));
21125 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
21126 BLANK();
21127
21128diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
21129index 80091ae..0c5184f 100644
21130--- a/arch/x86/kernel/cpu/Makefile
21131+++ b/arch/x86/kernel/cpu/Makefile
21132@@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
21133 CFLAGS_REMOVE_perf_event.o = -pg
21134 endif
21135
21136-# Make sure load_percpu_segment has no stackprotector
21137-nostackp := $(call cc-option, -fno-stack-protector)
21138-CFLAGS_common.o := $(nostackp)
21139-
21140 obj-y := intel_cacheinfo.o scattered.o topology.o
21141 obj-y += common.o
21142 obj-y += rdrand.o
21143diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
21144index a220239..607fc38 100644
21145--- a/arch/x86/kernel/cpu/amd.c
21146+++ b/arch/x86/kernel/cpu/amd.c
21147@@ -717,7 +717,7 @@ static void init_amd(struct cpuinfo_x86 *c)
21148 static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size)
21149 {
21150 /* AMD errata T13 (order #21922) */
21151- if ((c->x86 == 6)) {
21152+ if (c->x86 == 6) {
21153 /* Duron Rev A0 */
21154 if (c->x86_model == 3 && c->x86_mask == 0)
21155 size = 64;
21156diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
21157index 2346c95..c061472 100644
21158--- a/arch/x86/kernel/cpu/common.c
21159+++ b/arch/x86/kernel/cpu/common.c
21160@@ -91,60 +91,6 @@ static const struct cpu_dev default_cpu = {
21161
21162 static const struct cpu_dev *this_cpu = &default_cpu;
21163
21164-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
21165-#ifdef CONFIG_X86_64
21166- /*
21167- * We need valid kernel segments for data and code in long mode too
21168- * IRET will check the segment types kkeil 2000/10/28
21169- * Also sysret mandates a special GDT layout
21170- *
21171- * TLS descriptors are currently at a different place compared to i386.
21172- * Hopefully nobody expects them at a fixed place (Wine?)
21173- */
21174- [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
21175- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
21176- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
21177- [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
21178- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
21179- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
21180-#else
21181- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
21182- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
21183- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
21184- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
21185- /*
21186- * Segments used for calling PnP BIOS have byte granularity.
21187- * They code segments and data segments have fixed 64k limits,
21188- * the transfer segment sizes are set at run time.
21189- */
21190- /* 32-bit code */
21191- [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
21192- /* 16-bit code */
21193- [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
21194- /* 16-bit data */
21195- [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
21196- /* 16-bit data */
21197- [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
21198- /* 16-bit data */
21199- [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
21200- /*
21201- * The APM segments have byte granularity and their bases
21202- * are set at run time. All have 64k limits.
21203- */
21204- /* 32-bit code */
21205- [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
21206- /* 16-bit code */
21207- [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
21208- /* data */
21209- [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
21210-
21211- [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
21212- [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
21213- GDT_STACK_CANARY_INIT
21214-#endif
21215-} };
21216-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
21217-
21218 static int __init x86_xsave_setup(char *s)
21219 {
21220 if (strlen(s))
21221@@ -306,6 +252,59 @@ static __always_inline void setup_smap(struct cpuinfo_x86 *c)
21222 }
21223 }
21224
21225+#ifdef CONFIG_X86_64
21226+static __init int setup_disable_pcid(char *arg)
21227+{
21228+ setup_clear_cpu_cap(X86_FEATURE_PCID);
21229+ setup_clear_cpu_cap(X86_FEATURE_INVPCID);
21230+
21231+#ifdef CONFIG_PAX_MEMORY_UDEREF
21232+ if (clone_pgd_mask != ~(pgdval_t)0UL)
21233+ pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
21234+#endif
21235+
21236+ return 1;
21237+}
21238+__setup("nopcid", setup_disable_pcid);
21239+
21240+static void setup_pcid(struct cpuinfo_x86 *c)
21241+{
21242+ if (!cpu_has(c, X86_FEATURE_PCID)) {
21243+ clear_cpu_cap(c, X86_FEATURE_INVPCID);
21244+
21245+#ifdef CONFIG_PAX_MEMORY_UDEREF
21246+ if (clone_pgd_mask != ~(pgdval_t)0UL) {
21247+ pax_open_kernel();
21248+ pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
21249+ pax_close_kernel();
21250+ printk("PAX: slow and weak UDEREF enabled\n");
21251+ } else
21252+ printk("PAX: UDEREF disabled\n");
21253+#endif
21254+
21255+ return;
21256+ }
21257+
21258+ printk("PAX: PCID detected\n");
21259+ cr4_set_bits(X86_CR4_PCIDE);
21260+
21261+#ifdef CONFIG_PAX_MEMORY_UDEREF
21262+ pax_open_kernel();
21263+ clone_pgd_mask = ~(pgdval_t)0UL;
21264+ pax_close_kernel();
21265+ if (pax_user_shadow_base)
21266+ printk("PAX: weak UDEREF enabled\n");
21267+ else {
21268+ set_cpu_cap(c, X86_FEATURE_STRONGUDEREF);
21269+ printk("PAX: strong UDEREF enabled\n");
21270+ }
21271+#endif
21272+
21273+ if (cpu_has(c, X86_FEATURE_INVPCID))
21274+ printk("PAX: INVPCID detected\n");
21275+}
21276+#endif
21277+
21278 /*
21279 * Some CPU features depend on higher CPUID levels, which may not always
21280 * be available due to CPUID level capping or broken virtualization
21281@@ -406,7 +405,7 @@ void switch_to_new_gdt(int cpu)
21282 {
21283 struct desc_ptr gdt_descr;
21284
21285- gdt_descr.address = (long)get_cpu_gdt_table(cpu);
21286+ gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
21287 gdt_descr.size = GDT_SIZE - 1;
21288 load_gdt(&gdt_descr);
21289 /* Reload the per-cpu base */
21290@@ -897,6 +896,20 @@ static void identify_cpu(struct cpuinfo_x86 *c)
21291 setup_smep(c);
21292 setup_smap(c);
21293
21294+#ifdef CONFIG_X86_32
21295+#ifdef CONFIG_PAX_PAGEEXEC
21296+ if (!(__supported_pte_mask & _PAGE_NX))
21297+ clear_cpu_cap(c, X86_FEATURE_PSE);
21298+#endif
21299+#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
21300+ clear_cpu_cap(c, X86_FEATURE_SEP);
21301+#endif
21302+#endif
21303+
21304+#ifdef CONFIG_X86_64
21305+ setup_pcid(c);
21306+#endif
21307+
21308 /*
21309 * The vendor-specific functions might have changed features.
21310 * Now we do "generic changes."
21311@@ -979,7 +992,7 @@ static void syscall32_cpu_init(void)
21312 void enable_sep_cpu(void)
21313 {
21314 int cpu = get_cpu();
21315- struct tss_struct *tss = &per_cpu(init_tss, cpu);
21316+ struct tss_struct *tss = init_tss + cpu;
21317
21318 if (!boot_cpu_has(X86_FEATURE_SEP)) {
21319 put_cpu();
21320@@ -1117,14 +1130,16 @@ static __init int setup_disablecpuid(char *arg)
21321 }
21322 __setup("clearcpuid=", setup_disablecpuid);
21323
21324+DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
21325+EXPORT_PER_CPU_SYMBOL(current_tinfo);
21326+
21327 DEFINE_PER_CPU(unsigned long, kernel_stack) =
21328- (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
21329+ (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
21330 EXPORT_PER_CPU_SYMBOL(kernel_stack);
21331
21332 #ifdef CONFIG_X86_64
21333-struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
21334-struct desc_ptr debug_idt_descr = { NR_VECTORS * 16 - 1,
21335- (unsigned long) debug_idt_table };
21336+struct desc_ptr idt_descr __read_only = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
21337+const struct desc_ptr debug_idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) debug_idt_table };
21338
21339 DEFINE_PER_CPU_FIRST(union irq_stack_union,
21340 irq_stack_union) __aligned(PAGE_SIZE) __visible;
21341@@ -1307,7 +1322,7 @@ void cpu_init(void)
21342 */
21343 load_ucode_ap();
21344
21345- t = &per_cpu(init_tss, cpu);
21346+ t = init_tss + cpu;
21347 oist = &per_cpu(orig_ist, cpu);
21348
21349 #ifdef CONFIG_NUMA
21350@@ -1339,7 +1354,6 @@ void cpu_init(void)
21351 wrmsrl(MSR_KERNEL_GS_BASE, 0);
21352 barrier();
21353
21354- x86_configure_nx();
21355 x2apic_setup();
21356
21357 /*
21358@@ -1391,7 +1405,7 @@ void cpu_init(void)
21359 {
21360 int cpu = smp_processor_id();
21361 struct task_struct *curr = current;
21362- struct tss_struct *t = &per_cpu(init_tss, cpu);
21363+ struct tss_struct *t = init_tss + cpu;
21364 struct thread_struct *thread = &curr->thread;
21365
21366 wait_for_master_cpu(cpu);
21367diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
21368index 6596433..1ad6eaf 100644
21369--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
21370+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
21371@@ -1024,6 +1024,22 @@ static struct attribute *default_attrs[] = {
21372 };
21373
21374 #ifdef CONFIG_AMD_NB
21375+static struct attribute *default_attrs_amd_nb[] = {
21376+ &type.attr,
21377+ &level.attr,
21378+ &coherency_line_size.attr,
21379+ &physical_line_partition.attr,
21380+ &ways_of_associativity.attr,
21381+ &number_of_sets.attr,
21382+ &size.attr,
21383+ &shared_cpu_map.attr,
21384+ &shared_cpu_list.attr,
21385+ NULL,
21386+ NULL,
21387+ NULL,
21388+ NULL
21389+};
21390+
21391 static struct attribute **amd_l3_attrs(void)
21392 {
21393 static struct attribute **attrs;
21394@@ -1034,18 +1050,7 @@ static struct attribute **amd_l3_attrs(void)
21395
21396 n = ARRAY_SIZE(default_attrs);
21397
21398- if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
21399- n += 2;
21400-
21401- if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
21402- n += 1;
21403-
21404- attrs = kzalloc(n * sizeof (struct attribute *), GFP_KERNEL);
21405- if (attrs == NULL)
21406- return attrs = default_attrs;
21407-
21408- for (n = 0; default_attrs[n]; n++)
21409- attrs[n] = default_attrs[n];
21410+ attrs = default_attrs_amd_nb;
21411
21412 if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) {
21413 attrs[n++] = &cache_disable_0.attr;
21414@@ -1096,6 +1101,13 @@ static struct kobj_type ktype_cache = {
21415 .default_attrs = default_attrs,
21416 };
21417
21418+#ifdef CONFIG_AMD_NB
21419+static struct kobj_type ktype_cache_amd_nb = {
21420+ .sysfs_ops = &sysfs_ops,
21421+ .default_attrs = default_attrs_amd_nb,
21422+};
21423+#endif
21424+
21425 static struct kobj_type ktype_percpu_entry = {
21426 .sysfs_ops = &sysfs_ops,
21427 };
21428@@ -1161,20 +1173,26 @@ static int cache_add_dev(struct device *dev)
21429 return retval;
21430 }
21431
21432+#ifdef CONFIG_AMD_NB
21433+ amd_l3_attrs();
21434+#endif
21435+
21436 for (i = 0; i < num_cache_leaves; i++) {
21437+ struct kobj_type *ktype;
21438+
21439 this_object = INDEX_KOBJECT_PTR(cpu, i);
21440 this_object->cpu = cpu;
21441 this_object->index = i;
21442
21443 this_leaf = CPUID4_INFO_IDX(cpu, i);
21444
21445- ktype_cache.default_attrs = default_attrs;
21446+ ktype = &ktype_cache;
21447 #ifdef CONFIG_AMD_NB
21448 if (this_leaf->base.nb)
21449- ktype_cache.default_attrs = amd_l3_attrs();
21450+ ktype = &ktype_cache_amd_nb;
21451 #endif
21452 retval = kobject_init_and_add(&(this_object->kobj),
21453- &ktype_cache,
21454+ ktype,
21455 per_cpu(ici_cache_kobject, cpu),
21456 "index%1lu", i);
21457 if (unlikely(retval)) {
21458diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
21459index 11dd8f2..fd88f68 100644
21460--- a/arch/x86/kernel/cpu/mcheck/mce.c
21461+++ b/arch/x86/kernel/cpu/mcheck/mce.c
21462@@ -47,6 +47,7 @@
21463 #include <asm/tlbflush.h>
21464 #include <asm/mce.h>
21465 #include <asm/msr.h>
21466+#include <asm/local.h>
21467
21468 #include "mce-internal.h"
21469
21470@@ -258,7 +259,7 @@ static void print_mce(struct mce *m)
21471 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
21472 m->cs, m->ip);
21473
21474- if (m->cs == __KERNEL_CS)
21475+ if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
21476 print_symbol("{%s}", m->ip);
21477 pr_cont("\n");
21478 }
21479@@ -291,10 +292,10 @@ static void print_mce(struct mce *m)
21480
21481 #define PANIC_TIMEOUT 5 /* 5 seconds */
21482
21483-static atomic_t mce_panicked;
21484+static atomic_unchecked_t mce_panicked;
21485
21486 static int fake_panic;
21487-static atomic_t mce_fake_panicked;
21488+static atomic_unchecked_t mce_fake_panicked;
21489
21490 /* Panic in progress. Enable interrupts and wait for final IPI */
21491 static void wait_for_panic(void)
21492@@ -318,7 +319,7 @@ static void mce_panic(const char *msg, struct mce *final, char *exp)
21493 /*
21494 * Make sure only one CPU runs in machine check panic
21495 */
21496- if (atomic_inc_return(&mce_panicked) > 1)
21497+ if (atomic_inc_return_unchecked(&mce_panicked) > 1)
21498 wait_for_panic();
21499 barrier();
21500
21501@@ -326,7 +327,7 @@ static void mce_panic(const char *msg, struct mce *final, char *exp)
21502 console_verbose();
21503 } else {
21504 /* Don't log too much for fake panic */
21505- if (atomic_inc_return(&mce_fake_panicked) > 1)
21506+ if (atomic_inc_return_unchecked(&mce_fake_panicked) > 1)
21507 return;
21508 }
21509 /* First print corrected ones that are still unlogged */
21510@@ -365,7 +366,7 @@ static void mce_panic(const char *msg, struct mce *final, char *exp)
21511 if (!fake_panic) {
21512 if (panic_timeout == 0)
21513 panic_timeout = mca_cfg.panic_timeout;
21514- panic(msg);
21515+ panic("%s", msg);
21516 } else
21517 pr_emerg(HW_ERR "Fake kernel panic: %s\n", msg);
21518 }
21519@@ -746,7 +747,7 @@ static int mce_timed_out(u64 *t, const char *msg)
21520 * might have been modified by someone else.
21521 */
21522 rmb();
21523- if (atomic_read(&mce_panicked))
21524+ if (atomic_read_unchecked(&mce_panicked))
21525 wait_for_panic();
21526 if (!mca_cfg.monarch_timeout)
21527 goto out;
21528@@ -1672,7 +1673,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
21529 }
21530
21531 /* Call the installed machine check handler for this CPU setup. */
21532-void (*machine_check_vector)(struct pt_regs *, long error_code) =
21533+void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
21534 unexpected_machine_check;
21535
21536 /*
21537@@ -1695,7 +1696,9 @@ void mcheck_cpu_init(struct cpuinfo_x86 *c)
21538 return;
21539 }
21540
21541+ pax_open_kernel();
21542 machine_check_vector = do_machine_check;
21543+ pax_close_kernel();
21544
21545 __mcheck_cpu_init_generic();
21546 __mcheck_cpu_init_vendor(c);
21547@@ -1709,7 +1712,7 @@ void mcheck_cpu_init(struct cpuinfo_x86 *c)
21548 */
21549
21550 static DEFINE_SPINLOCK(mce_chrdev_state_lock);
21551-static int mce_chrdev_open_count; /* #times opened */
21552+static local_t mce_chrdev_open_count; /* #times opened */
21553 static int mce_chrdev_open_exclu; /* already open exclusive? */
21554
21555 static int mce_chrdev_open(struct inode *inode, struct file *file)
21556@@ -1717,7 +1720,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
21557 spin_lock(&mce_chrdev_state_lock);
21558
21559 if (mce_chrdev_open_exclu ||
21560- (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
21561+ (local_read(&mce_chrdev_open_count) && (file->f_flags & O_EXCL))) {
21562 spin_unlock(&mce_chrdev_state_lock);
21563
21564 return -EBUSY;
21565@@ -1725,7 +1728,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
21566
21567 if (file->f_flags & O_EXCL)
21568 mce_chrdev_open_exclu = 1;
21569- mce_chrdev_open_count++;
21570+ local_inc(&mce_chrdev_open_count);
21571
21572 spin_unlock(&mce_chrdev_state_lock);
21573
21574@@ -1736,7 +1739,7 @@ static int mce_chrdev_release(struct inode *inode, struct file *file)
21575 {
21576 spin_lock(&mce_chrdev_state_lock);
21577
21578- mce_chrdev_open_count--;
21579+ local_dec(&mce_chrdev_open_count);
21580 mce_chrdev_open_exclu = 0;
21581
21582 spin_unlock(&mce_chrdev_state_lock);
21583@@ -2411,7 +2414,7 @@ static __init void mce_init_banks(void)
21584
21585 for (i = 0; i < mca_cfg.banks; i++) {
21586 struct mce_bank *b = &mce_banks[i];
21587- struct device_attribute *a = &b->attr;
21588+ device_attribute_no_const *a = &b->attr;
21589
21590 sysfs_attr_init(&a->attr);
21591 a->attr.name = b->attrname;
21592@@ -2518,7 +2521,7 @@ struct dentry *mce_get_debugfs_dir(void)
21593 static void mce_reset(void)
21594 {
21595 cpu_missing = 0;
21596- atomic_set(&mce_fake_panicked, 0);
21597+ atomic_set_unchecked(&mce_fake_panicked, 0);
21598 atomic_set(&mce_executing, 0);
21599 atomic_set(&mce_callin, 0);
21600 atomic_set(&global_nwo, 0);
21601diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
21602index 737b0ad..09ec66e 100644
21603--- a/arch/x86/kernel/cpu/mcheck/p5.c
21604+++ b/arch/x86/kernel/cpu/mcheck/p5.c
21605@@ -12,6 +12,7 @@
21606 #include <asm/tlbflush.h>
21607 #include <asm/mce.h>
21608 #include <asm/msr.h>
21609+#include <asm/pgtable.h>
21610
21611 /* By default disabled */
21612 int mce_p5_enabled __read_mostly;
21613@@ -55,7 +56,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
21614 if (!cpu_has(c, X86_FEATURE_MCE))
21615 return;
21616
21617+ pax_open_kernel();
21618 machine_check_vector = pentium_machine_check;
21619+ pax_close_kernel();
21620 /* Make sure the vector pointer is visible before we enable MCEs: */
21621 wmb();
21622
21623diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
21624index 44f1382..315b292 100644
21625--- a/arch/x86/kernel/cpu/mcheck/winchip.c
21626+++ b/arch/x86/kernel/cpu/mcheck/winchip.c
21627@@ -11,6 +11,7 @@
21628 #include <asm/tlbflush.h>
21629 #include <asm/mce.h>
21630 #include <asm/msr.h>
21631+#include <asm/pgtable.h>
21632
21633 /* Machine check handler for WinChip C6: */
21634 static void winchip_machine_check(struct pt_regs *regs, long error_code)
21635@@ -28,7 +29,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
21636 {
21637 u32 lo, hi;
21638
21639+ pax_open_kernel();
21640 machine_check_vector = winchip_machine_check;
21641+ pax_close_kernel();
21642 /* Make sure the vector pointer is visible before we enable MCEs: */
21643 wmb();
21644
21645diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
21646index 36a8361..e7058c2 100644
21647--- a/arch/x86/kernel/cpu/microcode/core.c
21648+++ b/arch/x86/kernel/cpu/microcode/core.c
21649@@ -518,7 +518,7 @@ mc_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu)
21650 return NOTIFY_OK;
21651 }
21652
21653-static struct notifier_block __refdata mc_cpu_notifier = {
21654+static struct notifier_block mc_cpu_notifier = {
21655 .notifier_call = mc_cpu_callback,
21656 };
21657
21658diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
21659index 746e7fd..8dc677e 100644
21660--- a/arch/x86/kernel/cpu/microcode/intel.c
21661+++ b/arch/x86/kernel/cpu/microcode/intel.c
21662@@ -298,13 +298,13 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device,
21663
21664 static int get_ucode_user(void *to, const void *from, size_t n)
21665 {
21666- return copy_from_user(to, from, n);
21667+ return copy_from_user(to, (const void __force_user *)from, n);
21668 }
21669
21670 static enum ucode_state
21671 request_microcode_user(int cpu, const void __user *buf, size_t size)
21672 {
21673- return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
21674+ return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
21675 }
21676
21677 static void microcode_fini_cpu(int cpu)
21678diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
21679index ea5f363..cb0e905 100644
21680--- a/arch/x86/kernel/cpu/mtrr/main.c
21681+++ b/arch/x86/kernel/cpu/mtrr/main.c
21682@@ -66,7 +66,7 @@ static DEFINE_MUTEX(mtrr_mutex);
21683 u64 size_or_mask, size_and_mask;
21684 static bool mtrr_aps_delayed_init;
21685
21686-static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
21687+static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
21688
21689 const struct mtrr_ops *mtrr_if;
21690
21691diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
21692index df5e41f..816c719 100644
21693--- a/arch/x86/kernel/cpu/mtrr/mtrr.h
21694+++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
21695@@ -25,7 +25,7 @@ struct mtrr_ops {
21696 int (*validate_add_page)(unsigned long base, unsigned long size,
21697 unsigned int type);
21698 int (*have_wrcomb)(void);
21699-};
21700+} __do_const;
21701
21702 extern int generic_get_free_region(unsigned long base, unsigned long size,
21703 int replace_reg);
21704diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
21705index b71a7f8..534af0e 100644
21706--- a/arch/x86/kernel/cpu/perf_event.c
21707+++ b/arch/x86/kernel/cpu/perf_event.c
21708@@ -1376,7 +1376,7 @@ static void __init pmu_check_apic(void)
21709
21710 }
21711
21712-static struct attribute_group x86_pmu_format_group = {
21713+static attribute_group_no_const x86_pmu_format_group = {
21714 .name = "format",
21715 .attrs = NULL,
21716 };
21717@@ -1475,7 +1475,7 @@ static struct attribute *events_attr[] = {
21718 NULL,
21719 };
21720
21721-static struct attribute_group x86_pmu_events_group = {
21722+static attribute_group_no_const x86_pmu_events_group = {
21723 .name = "events",
21724 .attrs = events_attr,
21725 };
21726@@ -2037,7 +2037,7 @@ static unsigned long get_segment_base(unsigned int segment)
21727 if (idx > GDT_ENTRIES)
21728 return 0;
21729
21730- desc = raw_cpu_ptr(gdt_page.gdt);
21731+ desc = get_cpu_gdt_table(smp_processor_id());
21732 }
21733
21734 return get_desc_base(desc + idx);
21735@@ -2127,7 +2127,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
21736 break;
21737
21738 perf_callchain_store(entry, frame.return_address);
21739- fp = frame.next_frame;
21740+ fp = (const void __force_user *)frame.next_frame;
21741 }
21742 }
21743
21744diff --git a/arch/x86/kernel/cpu/perf_event_amd_iommu.c b/arch/x86/kernel/cpu/perf_event_amd_iommu.c
21745index 97242a9..cf9c30e 100644
21746--- a/arch/x86/kernel/cpu/perf_event_amd_iommu.c
21747+++ b/arch/x86/kernel/cpu/perf_event_amd_iommu.c
21748@@ -402,7 +402,7 @@ static void perf_iommu_del(struct perf_event *event, int flags)
21749 static __init int _init_events_attrs(struct perf_amd_iommu *perf_iommu)
21750 {
21751 struct attribute **attrs;
21752- struct attribute_group *attr_group;
21753+ attribute_group_no_const *attr_group;
21754 int i = 0, j;
21755
21756 while (amd_iommu_v2_event_descs[i].attr.attr.name)
21757diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
21758index 2589906..1ca1000 100644
21759--- a/arch/x86/kernel/cpu/perf_event_intel.c
21760+++ b/arch/x86/kernel/cpu/perf_event_intel.c
21761@@ -2353,10 +2353,10 @@ __init int intel_pmu_init(void)
21762 x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3);
21763
21764 if (boot_cpu_has(X86_FEATURE_PDCM)) {
21765- u64 capabilities;
21766+ u64 capabilities = x86_pmu.intel_cap.capabilities;
21767
21768- rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
21769- x86_pmu.intel_cap.capabilities = capabilities;
21770+ if (rdmsrl_safe(MSR_IA32_PERF_CAPABILITIES, &x86_pmu.intel_cap.capabilities))
21771+ x86_pmu.intel_cap.capabilities = capabilities;
21772 }
21773
21774 intel_ds_init();
21775diff --git a/arch/x86/kernel/cpu/perf_event_intel_rapl.c b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
21776index 76d8cbe..e5f9681 100644
21777--- a/arch/x86/kernel/cpu/perf_event_intel_rapl.c
21778+++ b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
21779@@ -465,7 +465,7 @@ static struct attribute *rapl_events_hsw_attr[] = {
21780 NULL,
21781 };
21782
21783-static struct attribute_group rapl_pmu_events_group = {
21784+static attribute_group_no_const rapl_pmu_events_group __read_only = {
21785 .name = "events",
21786 .attrs = NULL, /* patched at runtime */
21787 };
21788diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
21789index c635b8b..b78835e 100644
21790--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
21791+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
21792@@ -733,7 +733,7 @@ static void __init uncore_types_exit(struct intel_uncore_type **types)
21793 static int __init uncore_type_init(struct intel_uncore_type *type)
21794 {
21795 struct intel_uncore_pmu *pmus;
21796- struct attribute_group *attr_group;
21797+ attribute_group_no_const *attr_group;
21798 struct attribute **attrs;
21799 int i, j;
21800
21801diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.h b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
21802index 6c8c1e7..515b98a 100644
21803--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.h
21804+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
21805@@ -114,7 +114,7 @@ struct intel_uncore_box {
21806 struct uncore_event_desc {
21807 struct kobj_attribute attr;
21808 const char *config;
21809-};
21810+} __do_const;
21811
21812 ssize_t uncore_event_show(struct kobject *kobj,
21813 struct kobj_attribute *attr, char *buf);
21814diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c
21815index 83741a7..bd3507d 100644
21816--- a/arch/x86/kernel/cpuid.c
21817+++ b/arch/x86/kernel/cpuid.c
21818@@ -170,7 +170,7 @@ static int cpuid_class_cpu_callback(struct notifier_block *nfb,
21819 return notifier_from_errno(err);
21820 }
21821
21822-static struct notifier_block __refdata cpuid_class_cpu_notifier =
21823+static struct notifier_block cpuid_class_cpu_notifier =
21824 {
21825 .notifier_call = cpuid_class_cpu_callback,
21826 };
21827diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
21828index aceb2f9..c76d3e3 100644
21829--- a/arch/x86/kernel/crash.c
21830+++ b/arch/x86/kernel/crash.c
21831@@ -105,7 +105,7 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
21832 #ifdef CONFIG_X86_32
21833 struct pt_regs fixed_regs;
21834
21835- if (!user_mode_vm(regs)) {
21836+ if (!user_mode(regs)) {
21837 crash_fixup_ss_esp(&fixed_regs, regs);
21838 regs = &fixed_regs;
21839 }
21840diff --git a/arch/x86/kernel/crash_dump_64.c b/arch/x86/kernel/crash_dump_64.c
21841index afa64ad..dce67dd 100644
21842--- a/arch/x86/kernel/crash_dump_64.c
21843+++ b/arch/x86/kernel/crash_dump_64.c
21844@@ -36,7 +36,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
21845 return -ENOMEM;
21846
21847 if (userbuf) {
21848- if (copy_to_user(buf, vaddr + offset, csize)) {
21849+ if (copy_to_user((char __force_user *)buf, vaddr + offset, csize)) {
21850 iounmap(vaddr);
21851 return -EFAULT;
21852 }
21853diff --git a/arch/x86/kernel/doublefault.c b/arch/x86/kernel/doublefault.c
21854index f6dfd93..892ade4 100644
21855--- a/arch/x86/kernel/doublefault.c
21856+++ b/arch/x86/kernel/doublefault.c
21857@@ -12,7 +12,7 @@
21858
21859 #define DOUBLEFAULT_STACKSIZE (1024)
21860 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
21861-#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
21862+#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
21863
21864 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
21865
21866@@ -22,7 +22,7 @@ static void doublefault_fn(void)
21867 unsigned long gdt, tss;
21868
21869 native_store_gdt(&gdt_desc);
21870- gdt = gdt_desc.address;
21871+ gdt = (unsigned long)gdt_desc.address;
21872
21873 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
21874
21875@@ -59,10 +59,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
21876 /* 0x2 bit is always set */
21877 .flags = X86_EFLAGS_SF | 0x2,
21878 .sp = STACK_START,
21879- .es = __USER_DS,
21880+ .es = __KERNEL_DS,
21881 .cs = __KERNEL_CS,
21882 .ss = __KERNEL_DS,
21883- .ds = __USER_DS,
21884+ .ds = __KERNEL_DS,
21885 .fs = __KERNEL_PERCPU,
21886
21887 .__cr3 = __pa_nodebug(swapper_pg_dir),
21888diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
21889index cf3df1d..b637d9a 100644
21890--- a/arch/x86/kernel/dumpstack.c
21891+++ b/arch/x86/kernel/dumpstack.c
21892@@ -2,6 +2,9 @@
21893 * Copyright (C) 1991, 1992 Linus Torvalds
21894 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
21895 */
21896+#ifdef CONFIG_GRKERNSEC_HIDESYM
21897+#define __INCLUDED_BY_HIDESYM 1
21898+#endif
21899 #include <linux/kallsyms.h>
21900 #include <linux/kprobes.h>
21901 #include <linux/uaccess.h>
21902@@ -33,23 +36,21 @@ static void printk_stack_address(unsigned long address, int reliable)
21903
21904 void printk_address(unsigned long address)
21905 {
21906- pr_cont(" [<%p>] %pS\n", (void *)address, (void *)address);
21907+ pr_cont(" [<%p>] %pA\n", (void *)address, (void *)address);
21908 }
21909
21910 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
21911 static void
21912 print_ftrace_graph_addr(unsigned long addr, void *data,
21913 const struct stacktrace_ops *ops,
21914- struct thread_info *tinfo, int *graph)
21915+ struct task_struct *task, int *graph)
21916 {
21917- struct task_struct *task;
21918 unsigned long ret_addr;
21919 int index;
21920
21921 if (addr != (unsigned long)return_to_handler)
21922 return;
21923
21924- task = tinfo->task;
21925 index = task->curr_ret_stack;
21926
21927 if (!task->ret_stack || index < *graph)
21928@@ -66,7 +67,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
21929 static inline void
21930 print_ftrace_graph_addr(unsigned long addr, void *data,
21931 const struct stacktrace_ops *ops,
21932- struct thread_info *tinfo, int *graph)
21933+ struct task_struct *task, int *graph)
21934 { }
21935 #endif
21936
21937@@ -77,10 +78,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
21938 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
21939 */
21940
21941-static inline int valid_stack_ptr(struct thread_info *tinfo,
21942- void *p, unsigned int size, void *end)
21943+static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
21944 {
21945- void *t = tinfo;
21946 if (end) {
21947 if (p < end && p >= (end-THREAD_SIZE))
21948 return 1;
21949@@ -91,14 +90,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
21950 }
21951
21952 unsigned long
21953-print_context_stack(struct thread_info *tinfo,
21954+print_context_stack(struct task_struct *task, void *stack_start,
21955 unsigned long *stack, unsigned long bp,
21956 const struct stacktrace_ops *ops, void *data,
21957 unsigned long *end, int *graph)
21958 {
21959 struct stack_frame *frame = (struct stack_frame *)bp;
21960
21961- while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
21962+ while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
21963 unsigned long addr;
21964
21965 addr = *stack;
21966@@ -110,7 +109,7 @@ print_context_stack(struct thread_info *tinfo,
21967 } else {
21968 ops->address(data, addr, 0);
21969 }
21970- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
21971+ print_ftrace_graph_addr(addr, data, ops, task, graph);
21972 }
21973 stack++;
21974 }
21975@@ -119,7 +118,7 @@ print_context_stack(struct thread_info *tinfo,
21976 EXPORT_SYMBOL_GPL(print_context_stack);
21977
21978 unsigned long
21979-print_context_stack_bp(struct thread_info *tinfo,
21980+print_context_stack_bp(struct task_struct *task, void *stack_start,
21981 unsigned long *stack, unsigned long bp,
21982 const struct stacktrace_ops *ops, void *data,
21983 unsigned long *end, int *graph)
21984@@ -127,7 +126,7 @@ print_context_stack_bp(struct thread_info *tinfo,
21985 struct stack_frame *frame = (struct stack_frame *)bp;
21986 unsigned long *ret_addr = &frame->return_address;
21987
21988- while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
21989+ while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
21990 unsigned long addr = *ret_addr;
21991
21992 if (!__kernel_text_address(addr))
21993@@ -136,7 +135,7 @@ print_context_stack_bp(struct thread_info *tinfo,
21994 ops->address(data, addr, 1);
21995 frame = frame->next_frame;
21996 ret_addr = &frame->return_address;
21997- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
21998+ print_ftrace_graph_addr(addr, data, ops, task, graph);
21999 }
22000
22001 return (unsigned long)frame;
22002@@ -155,7 +154,7 @@ static int print_trace_stack(void *data, char *name)
22003 static void print_trace_address(void *data, unsigned long addr, int reliable)
22004 {
22005 touch_nmi_watchdog();
22006- printk(data);
22007+ printk("%s", (char *)data);
22008 printk_stack_address(addr, reliable);
22009 }
22010
22011@@ -225,6 +224,8 @@ unsigned long oops_begin(void)
22012 EXPORT_SYMBOL_GPL(oops_begin);
22013 NOKPROBE_SYMBOL(oops_begin);
22014
22015+extern void gr_handle_kernel_exploit(void);
22016+
22017 void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
22018 {
22019 if (regs && kexec_should_crash(current))
22020@@ -246,7 +247,10 @@ void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
22021 panic("Fatal exception in interrupt");
22022 if (panic_on_oops)
22023 panic("Fatal exception");
22024- do_exit(signr);
22025+
22026+ gr_handle_kernel_exploit();
22027+
22028+ do_group_exit(signr);
22029 }
22030 NOKPROBE_SYMBOL(oops_end);
22031
22032@@ -278,7 +282,7 @@ int __die(const char *str, struct pt_regs *regs, long err)
22033 print_modules();
22034 show_regs(regs);
22035 #ifdef CONFIG_X86_32
22036- if (user_mode_vm(regs)) {
22037+ if (user_mode(regs)) {
22038 sp = regs->sp;
22039 ss = regs->ss & 0xffff;
22040 } else {
22041@@ -307,7 +311,7 @@ void die(const char *str, struct pt_regs *regs, long err)
22042 unsigned long flags = oops_begin();
22043 int sig = SIGSEGV;
22044
22045- if (!user_mode_vm(regs))
22046+ if (!user_mode(regs))
22047 report_bug(regs->ip, regs);
22048
22049 if (__die(str, regs, err))
22050diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
22051index 5abd4cd..ca97162 100644
22052--- a/arch/x86/kernel/dumpstack_32.c
22053+++ b/arch/x86/kernel/dumpstack_32.c
22054@@ -61,15 +61,14 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22055 bp = stack_frame(task, regs);
22056
22057 for (;;) {
22058- struct thread_info *context;
22059+ void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
22060 void *end_stack;
22061
22062 end_stack = is_hardirq_stack(stack, cpu);
22063 if (!end_stack)
22064 end_stack = is_softirq_stack(stack, cpu);
22065
22066- context = task_thread_info(task);
22067- bp = ops->walk_stack(context, stack, bp, ops, data,
22068+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data,
22069 end_stack, &graph);
22070
22071 /* Stop if not on irq stack */
22072@@ -123,27 +122,28 @@ void show_regs(struct pt_regs *regs)
22073 int i;
22074
22075 show_regs_print_info(KERN_EMERG);
22076- __show_regs(regs, !user_mode_vm(regs));
22077+ __show_regs(regs, !user_mode(regs));
22078
22079 /*
22080 * When in-kernel, we also print out the stack and code at the
22081 * time of the fault..
22082 */
22083- if (!user_mode_vm(regs)) {
22084+ if (!user_mode(regs)) {
22085 unsigned int code_prologue = code_bytes * 43 / 64;
22086 unsigned int code_len = code_bytes;
22087 unsigned char c;
22088 u8 *ip;
22089+ unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(0)[(0xffff & regs->cs) >> 3]);
22090
22091 pr_emerg("Stack:\n");
22092 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
22093
22094 pr_emerg("Code:");
22095
22096- ip = (u8 *)regs->ip - code_prologue;
22097+ ip = (u8 *)regs->ip - code_prologue + cs_base;
22098 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
22099 /* try starting at IP */
22100- ip = (u8 *)regs->ip;
22101+ ip = (u8 *)regs->ip + cs_base;
22102 code_len = code_len - code_prologue + 1;
22103 }
22104 for (i = 0; i < code_len; i++, ip++) {
22105@@ -152,7 +152,7 @@ void show_regs(struct pt_regs *regs)
22106 pr_cont(" Bad EIP value.");
22107 break;
22108 }
22109- if (ip == (u8 *)regs->ip)
22110+ if (ip == (u8 *)regs->ip + cs_base)
22111 pr_cont(" <%02x>", c);
22112 else
22113 pr_cont(" %02x", c);
22114@@ -165,6 +165,7 @@ int is_valid_bugaddr(unsigned long ip)
22115 {
22116 unsigned short ud2;
22117
22118+ ip = ktla_ktva(ip);
22119 if (ip < PAGE_OFFSET)
22120 return 0;
22121 if (probe_kernel_address((unsigned short *)ip, ud2))
22122@@ -172,3 +173,15 @@ int is_valid_bugaddr(unsigned long ip)
22123
22124 return ud2 == 0x0b0f;
22125 }
22126+
22127+#if defined(CONFIG_PAX_MEMORY_STACKLEAK) || defined(CONFIG_PAX_USERCOPY)
22128+void __used pax_check_alloca(unsigned long size)
22129+{
22130+ unsigned long sp = (unsigned long)&sp, stack_left;
22131+
22132+ /* all kernel stacks are of the same size */
22133+ stack_left = sp & (THREAD_SIZE - 1);
22134+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
22135+}
22136+EXPORT_SYMBOL(pax_check_alloca);
22137+#endif
22138diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
22139index ff86f19..a2efee8 100644
22140--- a/arch/x86/kernel/dumpstack_64.c
22141+++ b/arch/x86/kernel/dumpstack_64.c
22142@@ -153,12 +153,12 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22143 const struct stacktrace_ops *ops, void *data)
22144 {
22145 const unsigned cpu = get_cpu();
22146- struct thread_info *tinfo;
22147 unsigned long *irq_stack = (unsigned long *)per_cpu(irq_stack_ptr, cpu);
22148 unsigned long dummy;
22149 unsigned used = 0;
22150 int graph = 0;
22151 int done = 0;
22152+ void *stack_start;
22153
22154 if (!task)
22155 task = current;
22156@@ -179,7 +179,6 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22157 * current stack address. If the stacks consist of nested
22158 * exceptions
22159 */
22160- tinfo = task_thread_info(task);
22161 while (!done) {
22162 unsigned long *stack_end;
22163 enum stack_type stype;
22164@@ -202,7 +201,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22165 if (ops->stack(data, id) < 0)
22166 break;
22167
22168- bp = ops->walk_stack(tinfo, stack, bp, ops,
22169+ bp = ops->walk_stack(task, stack_end - EXCEPTION_STKSZ, stack, bp, ops,
22170 data, stack_end, &graph);
22171 ops->stack(data, "<EOE>");
22172 /*
22173@@ -210,6 +209,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22174 * second-to-last pointer (index -2 to end) in the
22175 * exception stack:
22176 */
22177+ if ((u16)stack_end[-1] != __KERNEL_DS)
22178+ goto out;
22179 stack = (unsigned long *) stack_end[-2];
22180 done = 0;
22181 break;
22182@@ -218,7 +219,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22183
22184 if (ops->stack(data, "IRQ") < 0)
22185 break;
22186- bp = ops->walk_stack(tinfo, stack, bp,
22187+ bp = ops->walk_stack(task, irq_stack, stack, bp,
22188 ops, data, stack_end, &graph);
22189 /*
22190 * We link to the next stack (which would be
22191@@ -240,7 +241,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22192 /*
22193 * This handles the process stack:
22194 */
22195- bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
22196+ stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
22197+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
22198+out:
22199 put_cpu();
22200 }
22201 EXPORT_SYMBOL(dump_trace);
22202@@ -344,8 +347,55 @@ int is_valid_bugaddr(unsigned long ip)
22203 {
22204 unsigned short ud2;
22205
22206- if (__copy_from_user(&ud2, (const void __user *) ip, sizeof(ud2)))
22207+ if (probe_kernel_address((unsigned short *)ip, ud2))
22208 return 0;
22209
22210 return ud2 == 0x0b0f;
22211 }
22212+
22213+#if defined(CONFIG_PAX_MEMORY_STACKLEAK) || defined(CONFIG_PAX_USERCOPY)
22214+void __used pax_check_alloca(unsigned long size)
22215+{
22216+ unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
22217+ unsigned cpu, used;
22218+ char *id;
22219+
22220+ /* check the process stack first */
22221+ stack_start = (unsigned long)task_stack_page(current);
22222+ stack_end = stack_start + THREAD_SIZE;
22223+ if (likely(stack_start <= sp && sp < stack_end)) {
22224+ unsigned long stack_left = sp & (THREAD_SIZE - 1);
22225+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
22226+ return;
22227+ }
22228+
22229+ cpu = get_cpu();
22230+
22231+ /* check the irq stacks */
22232+ stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
22233+ stack_start = stack_end - IRQ_STACK_SIZE;
22234+ if (stack_start <= sp && sp < stack_end) {
22235+ unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
22236+ put_cpu();
22237+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
22238+ return;
22239+ }
22240+
22241+ /* check the exception stacks */
22242+ used = 0;
22243+ stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
22244+ stack_start = stack_end - EXCEPTION_STKSZ;
22245+ if (stack_end && stack_start <= sp && sp < stack_end) {
22246+ unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
22247+ put_cpu();
22248+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
22249+ return;
22250+ }
22251+
22252+ put_cpu();
22253+
22254+ /* unknown stack */
22255+ BUG();
22256+}
22257+EXPORT_SYMBOL(pax_check_alloca);
22258+#endif
22259diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
22260index 46201de..ebffabf 100644
22261--- a/arch/x86/kernel/e820.c
22262+++ b/arch/x86/kernel/e820.c
22263@@ -794,8 +794,8 @@ unsigned long __init e820_end_of_low_ram_pfn(void)
22264
22265 static void early_panic(char *msg)
22266 {
22267- early_printk(msg);
22268- panic(msg);
22269+ early_printk("%s", msg);
22270+ panic("%s", msg);
22271 }
22272
22273 static int userdef __initdata;
22274diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
22275index a62536a..8444df4 100644
22276--- a/arch/x86/kernel/early_printk.c
22277+++ b/arch/x86/kernel/early_printk.c
22278@@ -7,6 +7,7 @@
22279 #include <linux/pci_regs.h>
22280 #include <linux/pci_ids.h>
22281 #include <linux/errno.h>
22282+#include <linux/sched.h>
22283 #include <asm/io.h>
22284 #include <asm/processor.h>
22285 #include <asm/fcntl.h>
22286diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
22287index 31e2d5b..b31c76d 100644
22288--- a/arch/x86/kernel/entry_32.S
22289+++ b/arch/x86/kernel/entry_32.S
22290@@ -177,13 +177,154 @@
22291 /*CFI_REL_OFFSET gs, PT_GS*/
22292 .endm
22293 .macro SET_KERNEL_GS reg
22294+
22295+#ifdef CONFIG_CC_STACKPROTECTOR
22296 movl $(__KERNEL_STACK_CANARY), \reg
22297+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
22298+ movl $(__USER_DS), \reg
22299+#else
22300+ xorl \reg, \reg
22301+#endif
22302+
22303 movl \reg, %gs
22304 .endm
22305
22306 #endif /* CONFIG_X86_32_LAZY_GS */
22307
22308-.macro SAVE_ALL
22309+.macro pax_enter_kernel
22310+#ifdef CONFIG_PAX_KERNEXEC
22311+ call pax_enter_kernel
22312+#endif
22313+.endm
22314+
22315+.macro pax_exit_kernel
22316+#ifdef CONFIG_PAX_KERNEXEC
22317+ call pax_exit_kernel
22318+#endif
22319+.endm
22320+
22321+#ifdef CONFIG_PAX_KERNEXEC
22322+ENTRY(pax_enter_kernel)
22323+#ifdef CONFIG_PARAVIRT
22324+ pushl %eax
22325+ pushl %ecx
22326+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
22327+ mov %eax, %esi
22328+#else
22329+ mov %cr0, %esi
22330+#endif
22331+ bts $16, %esi
22332+ jnc 1f
22333+ mov %cs, %esi
22334+ cmp $__KERNEL_CS, %esi
22335+ jz 3f
22336+ ljmp $__KERNEL_CS, $3f
22337+1: ljmp $__KERNEXEC_KERNEL_CS, $2f
22338+2:
22339+#ifdef CONFIG_PARAVIRT
22340+ mov %esi, %eax
22341+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
22342+#else
22343+ mov %esi, %cr0
22344+#endif
22345+3:
22346+#ifdef CONFIG_PARAVIRT
22347+ popl %ecx
22348+ popl %eax
22349+#endif
22350+ ret
22351+ENDPROC(pax_enter_kernel)
22352+
22353+ENTRY(pax_exit_kernel)
22354+#ifdef CONFIG_PARAVIRT
22355+ pushl %eax
22356+ pushl %ecx
22357+#endif
22358+ mov %cs, %esi
22359+ cmp $__KERNEXEC_KERNEL_CS, %esi
22360+ jnz 2f
22361+#ifdef CONFIG_PARAVIRT
22362+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
22363+ mov %eax, %esi
22364+#else
22365+ mov %cr0, %esi
22366+#endif
22367+ btr $16, %esi
22368+ ljmp $__KERNEL_CS, $1f
22369+1:
22370+#ifdef CONFIG_PARAVIRT
22371+ mov %esi, %eax
22372+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
22373+#else
22374+ mov %esi, %cr0
22375+#endif
22376+2:
22377+#ifdef CONFIG_PARAVIRT
22378+ popl %ecx
22379+ popl %eax
22380+#endif
22381+ ret
22382+ENDPROC(pax_exit_kernel)
22383+#endif
22384+
22385+ .macro pax_erase_kstack
22386+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
22387+ call pax_erase_kstack
22388+#endif
22389+ .endm
22390+
22391+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
22392+/*
22393+ * ebp: thread_info
22394+ */
22395+ENTRY(pax_erase_kstack)
22396+ pushl %edi
22397+ pushl %ecx
22398+ pushl %eax
22399+
22400+ mov TI_lowest_stack(%ebp), %edi
22401+ mov $-0xBEEF, %eax
22402+ std
22403+
22404+1: mov %edi, %ecx
22405+ and $THREAD_SIZE_asm - 1, %ecx
22406+ shr $2, %ecx
22407+ repne scasl
22408+ jecxz 2f
22409+
22410+ cmp $2*16, %ecx
22411+ jc 2f
22412+
22413+ mov $2*16, %ecx
22414+ repe scasl
22415+ jecxz 2f
22416+ jne 1b
22417+
22418+2: cld
22419+ or $2*4, %edi
22420+ mov %esp, %ecx
22421+ sub %edi, %ecx
22422+
22423+ cmp $THREAD_SIZE_asm, %ecx
22424+ jb 3f
22425+ ud2
22426+3:
22427+
22428+ shr $2, %ecx
22429+ rep stosl
22430+
22431+ mov TI_task_thread_sp0(%ebp), %edi
22432+ sub $128, %edi
22433+ mov %edi, TI_lowest_stack(%ebp)
22434+
22435+ popl %eax
22436+ popl %ecx
22437+ popl %edi
22438+ ret
22439+ENDPROC(pax_erase_kstack)
22440+#endif
22441+
22442+.macro __SAVE_ALL _DS
22443 cld
22444 PUSH_GS
22445 pushl_cfi %fs
22446@@ -206,7 +347,7 @@
22447 CFI_REL_OFFSET ecx, 0
22448 pushl_cfi %ebx
22449 CFI_REL_OFFSET ebx, 0
22450- movl $(__USER_DS), %edx
22451+ movl $\_DS, %edx
22452 movl %edx, %ds
22453 movl %edx, %es
22454 movl $(__KERNEL_PERCPU), %edx
22455@@ -214,6 +355,15 @@
22456 SET_KERNEL_GS %edx
22457 .endm
22458
22459+.macro SAVE_ALL
22460+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
22461+ __SAVE_ALL __KERNEL_DS
22462+ pax_enter_kernel
22463+#else
22464+ __SAVE_ALL __USER_DS
22465+#endif
22466+.endm
22467+
22468 .macro RESTORE_INT_REGS
22469 popl_cfi %ebx
22470 CFI_RESTORE ebx
22471@@ -297,7 +447,7 @@ ENTRY(ret_from_fork)
22472 popfl_cfi
22473 jmp syscall_exit
22474 CFI_ENDPROC
22475-END(ret_from_fork)
22476+ENDPROC(ret_from_fork)
22477
22478 ENTRY(ret_from_kernel_thread)
22479 CFI_STARTPROC
22480@@ -340,7 +490,15 @@ ret_from_intr:
22481 andl $SEGMENT_RPL_MASK, %eax
22482 #endif
22483 cmpl $USER_RPL, %eax
22484+
22485+#ifdef CONFIG_PAX_KERNEXEC
22486+ jae resume_userspace
22487+
22488+ pax_exit_kernel
22489+ jmp resume_kernel
22490+#else
22491 jb resume_kernel # not returning to v8086 or userspace
22492+#endif
22493
22494 ENTRY(resume_userspace)
22495 LOCKDEP_SYS_EXIT
22496@@ -352,8 +510,8 @@ ENTRY(resume_userspace)
22497 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
22498 # int/exception return?
22499 jne work_pending
22500- jmp restore_all
22501-END(ret_from_exception)
22502+ jmp restore_all_pax
22503+ENDPROC(ret_from_exception)
22504
22505 #ifdef CONFIG_PREEMPT
22506 ENTRY(resume_kernel)
22507@@ -365,7 +523,7 @@ need_resched:
22508 jz restore_all
22509 call preempt_schedule_irq
22510 jmp need_resched
22511-END(resume_kernel)
22512+ENDPROC(resume_kernel)
22513 #endif
22514 CFI_ENDPROC
22515
22516@@ -395,30 +553,45 @@ sysenter_past_esp:
22517 /*CFI_REL_OFFSET cs, 0*/
22518 /*
22519 * Push current_thread_info()->sysenter_return to the stack.
22520- * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
22521- * pushed above; +8 corresponds to copy_thread's esp0 setting.
22522 */
22523- pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
22524+ pushl_cfi $0
22525 CFI_REL_OFFSET eip, 0
22526
22527 pushl_cfi %eax
22528 SAVE_ALL
22529+ GET_THREAD_INFO(%ebp)
22530+ movl TI_sysenter_return(%ebp),%ebp
22531+ movl %ebp,PT_EIP(%esp)
22532 ENABLE_INTERRUPTS(CLBR_NONE)
22533
22534 /*
22535 * Load the potential sixth argument from user stack.
22536 * Careful about security.
22537 */
22538+ movl PT_OLDESP(%esp),%ebp
22539+
22540+#ifdef CONFIG_PAX_MEMORY_UDEREF
22541+ mov PT_OLDSS(%esp),%ds
22542+1: movl %ds:(%ebp),%ebp
22543+ push %ss
22544+ pop %ds
22545+#else
22546 cmpl $__PAGE_OFFSET-3,%ebp
22547 jae syscall_fault
22548 ASM_STAC
22549 1: movl (%ebp),%ebp
22550 ASM_CLAC
22551+#endif
22552+
22553 movl %ebp,PT_EBP(%esp)
22554 _ASM_EXTABLE(1b,syscall_fault)
22555
22556 GET_THREAD_INFO(%ebp)
22557
22558+#ifdef CONFIG_PAX_RANDKSTACK
22559+ pax_erase_kstack
22560+#endif
22561+
22562 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
22563 jnz sysenter_audit
22564 sysenter_do_call:
22565@@ -434,12 +607,24 @@ sysenter_after_call:
22566 testl $_TIF_ALLWORK_MASK, %ecx
22567 jne sysexit_audit
22568 sysenter_exit:
22569+
22570+#ifdef CONFIG_PAX_RANDKSTACK
22571+ pushl_cfi %eax
22572+ movl %esp, %eax
22573+ call pax_randomize_kstack
22574+ popl_cfi %eax
22575+#endif
22576+
22577+ pax_erase_kstack
22578+
22579 /* if something modifies registers it must also disable sysexit */
22580 movl PT_EIP(%esp), %edx
22581 movl PT_OLDESP(%esp), %ecx
22582 xorl %ebp,%ebp
22583 TRACE_IRQS_ON
22584 1: mov PT_FS(%esp), %fs
22585+2: mov PT_DS(%esp), %ds
22586+3: mov PT_ES(%esp), %es
22587 PTGS_TO_GS
22588 ENABLE_INTERRUPTS_SYSEXIT
22589
22590@@ -453,6 +638,9 @@ sysenter_audit:
22591 pushl_cfi PT_ESI(%esp) /* a3: 5th arg */
22592 pushl_cfi PT_EDX+4(%esp) /* a2: 4th arg */
22593 call __audit_syscall_entry
22594+
22595+ pax_erase_kstack
22596+
22597 popl_cfi %ecx /* get that remapped edx off the stack */
22598 popl_cfi %ecx /* get that remapped esi off the stack */
22599 movl PT_EAX(%esp),%eax /* reload syscall number */
22600@@ -479,10 +667,16 @@ sysexit_audit:
22601
22602 CFI_ENDPROC
22603 .pushsection .fixup,"ax"
22604-2: movl $0,PT_FS(%esp)
22605+4: movl $0,PT_FS(%esp)
22606+ jmp 1b
22607+5: movl $0,PT_DS(%esp)
22608+ jmp 1b
22609+6: movl $0,PT_ES(%esp)
22610 jmp 1b
22611 .popsection
22612- _ASM_EXTABLE(1b,2b)
22613+ _ASM_EXTABLE(1b,4b)
22614+ _ASM_EXTABLE(2b,5b)
22615+ _ASM_EXTABLE(3b,6b)
22616 PTGS_TO_GS_EX
22617 ENDPROC(ia32_sysenter_target)
22618
22619@@ -493,6 +687,11 @@ ENTRY(system_call)
22620 pushl_cfi %eax # save orig_eax
22621 SAVE_ALL
22622 GET_THREAD_INFO(%ebp)
22623+
22624+#ifdef CONFIG_PAX_RANDKSTACK
22625+ pax_erase_kstack
22626+#endif
22627+
22628 # system call tracing in operation / emulation
22629 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
22630 jnz syscall_trace_entry
22631@@ -512,6 +711,15 @@ syscall_exit:
22632 testl $_TIF_ALLWORK_MASK, %ecx # current->work
22633 jne syscall_exit_work
22634
22635+restore_all_pax:
22636+
22637+#ifdef CONFIG_PAX_RANDKSTACK
22638+ movl %esp, %eax
22639+ call pax_randomize_kstack
22640+#endif
22641+
22642+ pax_erase_kstack
22643+
22644 restore_all:
22645 TRACE_IRQS_IRET
22646 restore_all_notrace:
22647@@ -566,14 +774,34 @@ ldt_ss:
22648 * compensating for the offset by changing to the ESPFIX segment with
22649 * a base address that matches for the difference.
22650 */
22651-#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
22652+#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
22653 mov %esp, %edx /* load kernel esp */
22654 mov PT_OLDESP(%esp), %eax /* load userspace esp */
22655 mov %dx, %ax /* eax: new kernel esp */
22656 sub %eax, %edx /* offset (low word is 0) */
22657+#ifdef CONFIG_SMP
22658+ movl PER_CPU_VAR(cpu_number), %ebx
22659+ shll $PAGE_SHIFT_asm, %ebx
22660+ addl $cpu_gdt_table, %ebx
22661+#else
22662+ movl $cpu_gdt_table, %ebx
22663+#endif
22664 shr $16, %edx
22665- mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
22666- mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
22667+
22668+#ifdef CONFIG_PAX_KERNEXEC
22669+ mov %cr0, %esi
22670+ btr $16, %esi
22671+ mov %esi, %cr0
22672+#endif
22673+
22674+ mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
22675+ mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
22676+
22677+#ifdef CONFIG_PAX_KERNEXEC
22678+ bts $16, %esi
22679+ mov %esi, %cr0
22680+#endif
22681+
22682 pushl_cfi $__ESPFIX_SS
22683 pushl_cfi %eax /* new kernel esp */
22684 /* Disable interrupts, but do not irqtrace this section: we
22685@@ -603,20 +831,18 @@ work_resched:
22686 movl TI_flags(%ebp), %ecx
22687 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
22688 # than syscall tracing?
22689- jz restore_all
22690+ jz restore_all_pax
22691 testb $_TIF_NEED_RESCHED, %cl
22692 jnz work_resched
22693
22694 work_notifysig: # deal with pending signals and
22695 # notify-resume requests
22696+ movl %esp, %eax
22697 #ifdef CONFIG_VM86
22698 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
22699- movl %esp, %eax
22700 jne work_notifysig_v86 # returning to kernel-space or
22701 # vm86-space
22702 1:
22703-#else
22704- movl %esp, %eax
22705 #endif
22706 TRACE_IRQS_ON
22707 ENABLE_INTERRUPTS(CLBR_NONE)
22708@@ -637,7 +863,7 @@ work_notifysig_v86:
22709 movl %eax, %esp
22710 jmp 1b
22711 #endif
22712-END(work_pending)
22713+ENDPROC(work_pending)
22714
22715 # perform syscall exit tracing
22716 ALIGN
22717@@ -645,11 +871,14 @@ syscall_trace_entry:
22718 movl $-ENOSYS,PT_EAX(%esp)
22719 movl %esp, %eax
22720 call syscall_trace_enter
22721+
22722+ pax_erase_kstack
22723+
22724 /* What it returned is what we'll actually use. */
22725 cmpl $(NR_syscalls), %eax
22726 jnae syscall_call
22727 jmp syscall_exit
22728-END(syscall_trace_entry)
22729+ENDPROC(syscall_trace_entry)
22730
22731 # perform syscall exit tracing
22732 ALIGN
22733@@ -662,26 +891,30 @@ syscall_exit_work:
22734 movl %esp, %eax
22735 call syscall_trace_leave
22736 jmp resume_userspace
22737-END(syscall_exit_work)
22738+ENDPROC(syscall_exit_work)
22739 CFI_ENDPROC
22740
22741 RING0_INT_FRAME # can't unwind into user space anyway
22742 syscall_fault:
22743+#ifdef CONFIG_PAX_MEMORY_UDEREF
22744+ push %ss
22745+ pop %ds
22746+#endif
22747 ASM_CLAC
22748 GET_THREAD_INFO(%ebp)
22749 movl $-EFAULT,PT_EAX(%esp)
22750 jmp resume_userspace
22751-END(syscall_fault)
22752+ENDPROC(syscall_fault)
22753
22754 syscall_badsys:
22755 movl $-ENOSYS,%eax
22756 jmp syscall_after_call
22757-END(syscall_badsys)
22758+ENDPROC(syscall_badsys)
22759
22760 sysenter_badsys:
22761 movl $-ENOSYS,%eax
22762 jmp sysenter_after_call
22763-END(sysenter_badsys)
22764+ENDPROC(sysenter_badsys)
22765 CFI_ENDPROC
22766
22767 .macro FIXUP_ESPFIX_STACK
22768@@ -694,8 +927,15 @@ END(sysenter_badsys)
22769 */
22770 #ifdef CONFIG_X86_ESPFIX32
22771 /* fixup the stack */
22772- mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
22773- mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
22774+#ifdef CONFIG_SMP
22775+ movl PER_CPU_VAR(cpu_number), %ebx
22776+ shll $PAGE_SHIFT_asm, %ebx
22777+ addl $cpu_gdt_table, %ebx
22778+#else
22779+ movl $cpu_gdt_table, %ebx
22780+#endif
22781+ mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
22782+ mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
22783 shl $16, %eax
22784 addl %esp, %eax /* the adjusted stack pointer */
22785 pushl_cfi $__KERNEL_DS
22786@@ -751,7 +991,7 @@ vector=vector+1
22787 .endr
22788 2: jmp common_interrupt
22789 .endr
22790-END(irq_entries_start)
22791+ENDPROC(irq_entries_start)
22792
22793 .previous
22794 END(interrupt)
22795@@ -808,7 +1048,7 @@ ENTRY(coprocessor_error)
22796 pushl_cfi $do_coprocessor_error
22797 jmp error_code
22798 CFI_ENDPROC
22799-END(coprocessor_error)
22800+ENDPROC(coprocessor_error)
22801
22802 ENTRY(simd_coprocessor_error)
22803 RING0_INT_FRAME
22804@@ -821,7 +1061,7 @@ ENTRY(simd_coprocessor_error)
22805 .section .altinstructions,"a"
22806 altinstruction_entry 661b, 663f, X86_FEATURE_XMM, 662b-661b, 664f-663f
22807 .previous
22808-.section .altinstr_replacement,"ax"
22809+.section .altinstr_replacement,"a"
22810 663: pushl $do_simd_coprocessor_error
22811 664:
22812 .previous
22813@@ -830,7 +1070,7 @@ ENTRY(simd_coprocessor_error)
22814 #endif
22815 jmp error_code
22816 CFI_ENDPROC
22817-END(simd_coprocessor_error)
22818+ENDPROC(simd_coprocessor_error)
22819
22820 ENTRY(device_not_available)
22821 RING0_INT_FRAME
22822@@ -839,18 +1079,18 @@ ENTRY(device_not_available)
22823 pushl_cfi $do_device_not_available
22824 jmp error_code
22825 CFI_ENDPROC
22826-END(device_not_available)
22827+ENDPROC(device_not_available)
22828
22829 #ifdef CONFIG_PARAVIRT
22830 ENTRY(native_iret)
22831 iret
22832 _ASM_EXTABLE(native_iret, iret_exc)
22833-END(native_iret)
22834+ENDPROC(native_iret)
22835
22836 ENTRY(native_irq_enable_sysexit)
22837 sti
22838 sysexit
22839-END(native_irq_enable_sysexit)
22840+ENDPROC(native_irq_enable_sysexit)
22841 #endif
22842
22843 ENTRY(overflow)
22844@@ -860,7 +1100,7 @@ ENTRY(overflow)
22845 pushl_cfi $do_overflow
22846 jmp error_code
22847 CFI_ENDPROC
22848-END(overflow)
22849+ENDPROC(overflow)
22850
22851 ENTRY(bounds)
22852 RING0_INT_FRAME
22853@@ -869,7 +1109,7 @@ ENTRY(bounds)
22854 pushl_cfi $do_bounds
22855 jmp error_code
22856 CFI_ENDPROC
22857-END(bounds)
22858+ENDPROC(bounds)
22859
22860 ENTRY(invalid_op)
22861 RING0_INT_FRAME
22862@@ -878,7 +1118,7 @@ ENTRY(invalid_op)
22863 pushl_cfi $do_invalid_op
22864 jmp error_code
22865 CFI_ENDPROC
22866-END(invalid_op)
22867+ENDPROC(invalid_op)
22868
22869 ENTRY(coprocessor_segment_overrun)
22870 RING0_INT_FRAME
22871@@ -887,7 +1127,7 @@ ENTRY(coprocessor_segment_overrun)
22872 pushl_cfi $do_coprocessor_segment_overrun
22873 jmp error_code
22874 CFI_ENDPROC
22875-END(coprocessor_segment_overrun)
22876+ENDPROC(coprocessor_segment_overrun)
22877
22878 ENTRY(invalid_TSS)
22879 RING0_EC_FRAME
22880@@ -895,7 +1135,7 @@ ENTRY(invalid_TSS)
22881 pushl_cfi $do_invalid_TSS
22882 jmp error_code
22883 CFI_ENDPROC
22884-END(invalid_TSS)
22885+ENDPROC(invalid_TSS)
22886
22887 ENTRY(segment_not_present)
22888 RING0_EC_FRAME
22889@@ -903,7 +1143,7 @@ ENTRY(segment_not_present)
22890 pushl_cfi $do_segment_not_present
22891 jmp error_code
22892 CFI_ENDPROC
22893-END(segment_not_present)
22894+ENDPROC(segment_not_present)
22895
22896 ENTRY(stack_segment)
22897 RING0_EC_FRAME
22898@@ -911,7 +1151,7 @@ ENTRY(stack_segment)
22899 pushl_cfi $do_stack_segment
22900 jmp error_code
22901 CFI_ENDPROC
22902-END(stack_segment)
22903+ENDPROC(stack_segment)
22904
22905 ENTRY(alignment_check)
22906 RING0_EC_FRAME
22907@@ -919,7 +1159,7 @@ ENTRY(alignment_check)
22908 pushl_cfi $do_alignment_check
22909 jmp error_code
22910 CFI_ENDPROC
22911-END(alignment_check)
22912+ENDPROC(alignment_check)
22913
22914 ENTRY(divide_error)
22915 RING0_INT_FRAME
22916@@ -928,7 +1168,7 @@ ENTRY(divide_error)
22917 pushl_cfi $do_divide_error
22918 jmp error_code
22919 CFI_ENDPROC
22920-END(divide_error)
22921+ENDPROC(divide_error)
22922
22923 #ifdef CONFIG_X86_MCE
22924 ENTRY(machine_check)
22925@@ -938,7 +1178,7 @@ ENTRY(machine_check)
22926 pushl_cfi machine_check_vector
22927 jmp error_code
22928 CFI_ENDPROC
22929-END(machine_check)
22930+ENDPROC(machine_check)
22931 #endif
22932
22933 ENTRY(spurious_interrupt_bug)
22934@@ -948,7 +1188,7 @@ ENTRY(spurious_interrupt_bug)
22935 pushl_cfi $do_spurious_interrupt_bug
22936 jmp error_code
22937 CFI_ENDPROC
22938-END(spurious_interrupt_bug)
22939+ENDPROC(spurious_interrupt_bug)
22940
22941 #ifdef CONFIG_XEN
22942 /* Xen doesn't set %esp to be precisely what the normal sysenter
22943@@ -1057,7 +1297,7 @@ BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
22944
22945 ENTRY(mcount)
22946 ret
22947-END(mcount)
22948+ENDPROC(mcount)
22949
22950 ENTRY(ftrace_caller)
22951 pushl %eax
22952@@ -1087,7 +1327,7 @@ ftrace_graph_call:
22953 .globl ftrace_stub
22954 ftrace_stub:
22955 ret
22956-END(ftrace_caller)
22957+ENDPROC(ftrace_caller)
22958
22959 ENTRY(ftrace_regs_caller)
22960 pushf /* push flags before compare (in cs location) */
22961@@ -1185,7 +1425,7 @@ trace:
22962 popl %ecx
22963 popl %eax
22964 jmp ftrace_stub
22965-END(mcount)
22966+ENDPROC(mcount)
22967 #endif /* CONFIG_DYNAMIC_FTRACE */
22968 #endif /* CONFIG_FUNCTION_TRACER */
22969
22970@@ -1203,7 +1443,7 @@ ENTRY(ftrace_graph_caller)
22971 popl %ecx
22972 popl %eax
22973 ret
22974-END(ftrace_graph_caller)
22975+ENDPROC(ftrace_graph_caller)
22976
22977 .globl return_to_handler
22978 return_to_handler:
22979@@ -1264,15 +1504,18 @@ error_code:
22980 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
22981 REG_TO_PTGS %ecx
22982 SET_KERNEL_GS %ecx
22983- movl $(__USER_DS), %ecx
22984+ movl $(__KERNEL_DS), %ecx
22985 movl %ecx, %ds
22986 movl %ecx, %es
22987+
22988+ pax_enter_kernel
22989+
22990 TRACE_IRQS_OFF
22991 movl %esp,%eax # pt_regs pointer
22992 call *%edi
22993 jmp ret_from_exception
22994 CFI_ENDPROC
22995-END(page_fault)
22996+ENDPROC(page_fault)
22997
22998 /*
22999 * Debug traps and NMI can happen at the one SYSENTER instruction
23000@@ -1315,7 +1558,7 @@ debug_stack_correct:
23001 call do_debug
23002 jmp ret_from_exception
23003 CFI_ENDPROC
23004-END(debug)
23005+ENDPROC(debug)
23006
23007 /*
23008 * NMI is doubly nasty. It can happen _while_ we're handling
23009@@ -1355,6 +1598,9 @@ nmi_stack_correct:
23010 xorl %edx,%edx # zero error code
23011 movl %esp,%eax # pt_regs pointer
23012 call do_nmi
23013+
23014+ pax_exit_kernel
23015+
23016 jmp restore_all_notrace
23017 CFI_ENDPROC
23018
23019@@ -1392,13 +1638,16 @@ nmi_espfix_stack:
23020 FIXUP_ESPFIX_STACK # %eax == %esp
23021 xorl %edx,%edx # zero error code
23022 call do_nmi
23023+
23024+ pax_exit_kernel
23025+
23026 RESTORE_REGS
23027 lss 12+4(%esp), %esp # back to espfix stack
23028 CFI_ADJUST_CFA_OFFSET -24
23029 jmp irq_return
23030 #endif
23031 CFI_ENDPROC
23032-END(nmi)
23033+ENDPROC(nmi)
23034
23035 ENTRY(int3)
23036 RING0_INT_FRAME
23037@@ -1411,14 +1660,14 @@ ENTRY(int3)
23038 call do_int3
23039 jmp ret_from_exception
23040 CFI_ENDPROC
23041-END(int3)
23042+ENDPROC(int3)
23043
23044 ENTRY(general_protection)
23045 RING0_EC_FRAME
23046 pushl_cfi $do_general_protection
23047 jmp error_code
23048 CFI_ENDPROC
23049-END(general_protection)
23050+ENDPROC(general_protection)
23051
23052 #ifdef CONFIG_KVM_GUEST
23053 ENTRY(async_page_fault)
23054@@ -1427,6 +1676,6 @@ ENTRY(async_page_fault)
23055 pushl_cfi $do_async_page_fault
23056 jmp error_code
23057 CFI_ENDPROC
23058-END(async_page_fault)
23059+ENDPROC(async_page_fault)
23060 #endif
23061
23062diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
23063index f0095a7..7ece039 100644
23064--- a/arch/x86/kernel/entry_64.S
23065+++ b/arch/x86/kernel/entry_64.S
23066@@ -59,6 +59,8 @@
23067 #include <asm/smap.h>
23068 #include <asm/pgtable_types.h>
23069 #include <linux/err.h>
23070+#include <asm/pgtable.h>
23071+#include <asm/alternative-asm.h>
23072
23073 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
23074 #include <linux/elf-em.h>
23075@@ -81,6 +83,431 @@ ENTRY(native_usergs_sysret64)
23076 ENDPROC(native_usergs_sysret64)
23077 #endif /* CONFIG_PARAVIRT */
23078
23079+ .macro ljmpq sel, off
23080+#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
23081+ .byte 0x48; ljmp *1234f(%rip)
23082+ .pushsection .rodata
23083+ .align 16
23084+ 1234: .quad \off; .word \sel
23085+ .popsection
23086+#else
23087+ pushq $\sel
23088+ pushq $\off
23089+ lretq
23090+#endif
23091+ .endm
23092+
23093+ .macro pax_enter_kernel
23094+ pax_set_fptr_mask
23095+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
23096+ call pax_enter_kernel
23097+#endif
23098+ .endm
23099+
23100+ .macro pax_exit_kernel
23101+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
23102+ call pax_exit_kernel
23103+#endif
23104+
23105+ .endm
23106+
23107+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
23108+ENTRY(pax_enter_kernel)
23109+ pushq %rdi
23110+
23111+#ifdef CONFIG_PARAVIRT
23112+ PV_SAVE_REGS(CLBR_RDI)
23113+#endif
23114+
23115+#ifdef CONFIG_PAX_KERNEXEC
23116+ GET_CR0_INTO_RDI
23117+ bts $X86_CR0_WP_BIT,%rdi
23118+ jnc 3f
23119+ mov %cs,%edi
23120+ cmp $__KERNEL_CS,%edi
23121+ jnz 2f
23122+1:
23123+#endif
23124+
23125+#ifdef CONFIG_PAX_MEMORY_UDEREF
23126+ 661: jmp 111f
23127+ .pushsection .altinstr_replacement, "a"
23128+ 662: ASM_NOP2
23129+ .popsection
23130+ .pushsection .altinstructions, "a"
23131+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
23132+ .popsection
23133+ GET_CR3_INTO_RDI
23134+ cmp $0,%dil
23135+ jnz 112f
23136+ mov $__KERNEL_DS,%edi
23137+ mov %edi,%ss
23138+ jmp 111f
23139+112: cmp $1,%dil
23140+ jz 113f
23141+ ud2
23142+113: sub $4097,%rdi
23143+ bts $63,%rdi
23144+ SET_RDI_INTO_CR3
23145+ mov $__UDEREF_KERNEL_DS,%edi
23146+ mov %edi,%ss
23147+111:
23148+#endif
23149+
23150+#ifdef CONFIG_PARAVIRT
23151+ PV_RESTORE_REGS(CLBR_RDI)
23152+#endif
23153+
23154+ popq %rdi
23155+ pax_force_retaddr
23156+ retq
23157+
23158+#ifdef CONFIG_PAX_KERNEXEC
23159+2: ljmpq __KERNEL_CS,1b
23160+3: ljmpq __KERNEXEC_KERNEL_CS,4f
23161+4: SET_RDI_INTO_CR0
23162+ jmp 1b
23163+#endif
23164+ENDPROC(pax_enter_kernel)
23165+
23166+ENTRY(pax_exit_kernel)
23167+ pushq %rdi
23168+
23169+#ifdef CONFIG_PARAVIRT
23170+ PV_SAVE_REGS(CLBR_RDI)
23171+#endif
23172+
23173+#ifdef CONFIG_PAX_KERNEXEC
23174+ mov %cs,%rdi
23175+ cmp $__KERNEXEC_KERNEL_CS,%edi
23176+ jz 2f
23177+ GET_CR0_INTO_RDI
23178+ bts $X86_CR0_WP_BIT,%rdi
23179+ jnc 4f
23180+1:
23181+#endif
23182+
23183+#ifdef CONFIG_PAX_MEMORY_UDEREF
23184+ 661: jmp 111f
23185+ .pushsection .altinstr_replacement, "a"
23186+ 662: ASM_NOP2
23187+ .popsection
23188+ .pushsection .altinstructions, "a"
23189+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
23190+ .popsection
23191+ mov %ss,%edi
23192+ cmp $__UDEREF_KERNEL_DS,%edi
23193+ jnz 111f
23194+ GET_CR3_INTO_RDI
23195+ cmp $0,%dil
23196+ jz 112f
23197+ ud2
23198+112: add $4097,%rdi
23199+ bts $63,%rdi
23200+ SET_RDI_INTO_CR3
23201+ mov $__KERNEL_DS,%edi
23202+ mov %edi,%ss
23203+111:
23204+#endif
23205+
23206+#ifdef CONFIG_PARAVIRT
23207+ PV_RESTORE_REGS(CLBR_RDI);
23208+#endif
23209+
23210+ popq %rdi
23211+ pax_force_retaddr
23212+ retq
23213+
23214+#ifdef CONFIG_PAX_KERNEXEC
23215+2: GET_CR0_INTO_RDI
23216+ btr $X86_CR0_WP_BIT,%rdi
23217+ jnc 4f
23218+ ljmpq __KERNEL_CS,3f
23219+3: SET_RDI_INTO_CR0
23220+ jmp 1b
23221+4: ud2
23222+ jmp 4b
23223+#endif
23224+ENDPROC(pax_exit_kernel)
23225+#endif
23226+
23227+ .macro pax_enter_kernel_user
23228+ pax_set_fptr_mask
23229+#ifdef CONFIG_PAX_MEMORY_UDEREF
23230+ call pax_enter_kernel_user
23231+#endif
23232+ .endm
23233+
23234+ .macro pax_exit_kernel_user
23235+#ifdef CONFIG_PAX_MEMORY_UDEREF
23236+ call pax_exit_kernel_user
23237+#endif
23238+#ifdef CONFIG_PAX_RANDKSTACK
23239+ pushq %rax
23240+ pushq %r11
23241+ call pax_randomize_kstack
23242+ popq %r11
23243+ popq %rax
23244+#endif
23245+ .endm
23246+
23247+#ifdef CONFIG_PAX_MEMORY_UDEREF
23248+ENTRY(pax_enter_kernel_user)
23249+ pushq %rdi
23250+ pushq %rbx
23251+
23252+#ifdef CONFIG_PARAVIRT
23253+ PV_SAVE_REGS(CLBR_RDI)
23254+#endif
23255+
23256+ 661: jmp 111f
23257+ .pushsection .altinstr_replacement, "a"
23258+ 662: ASM_NOP2
23259+ .popsection
23260+ .pushsection .altinstructions, "a"
23261+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
23262+ .popsection
23263+ GET_CR3_INTO_RDI
23264+ cmp $1,%dil
23265+ jnz 4f
23266+ sub $4097,%rdi
23267+ bts $63,%rdi
23268+ SET_RDI_INTO_CR3
23269+ jmp 3f
23270+111:
23271+
23272+ GET_CR3_INTO_RDI
23273+ mov %rdi,%rbx
23274+ add $__START_KERNEL_map,%rbx
23275+ sub phys_base(%rip),%rbx
23276+
23277+#ifdef CONFIG_PARAVIRT
23278+ cmpl $0, pv_info+PARAVIRT_enabled
23279+ jz 1f
23280+ pushq %rdi
23281+ i = 0
23282+ .rept USER_PGD_PTRS
23283+ mov i*8(%rbx),%rsi
23284+ mov $0,%sil
23285+ lea i*8(%rbx),%rdi
23286+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
23287+ i = i + 1
23288+ .endr
23289+ popq %rdi
23290+ jmp 2f
23291+1:
23292+#endif
23293+
23294+ i = 0
23295+ .rept USER_PGD_PTRS
23296+ movb $0,i*8(%rbx)
23297+ i = i + 1
23298+ .endr
23299+
23300+2: SET_RDI_INTO_CR3
23301+
23302+#ifdef CONFIG_PAX_KERNEXEC
23303+ GET_CR0_INTO_RDI
23304+ bts $X86_CR0_WP_BIT,%rdi
23305+ SET_RDI_INTO_CR0
23306+#endif
23307+
23308+3:
23309+
23310+#ifdef CONFIG_PARAVIRT
23311+ PV_RESTORE_REGS(CLBR_RDI)
23312+#endif
23313+
23314+ popq %rbx
23315+ popq %rdi
23316+ pax_force_retaddr
23317+ retq
23318+4: ud2
23319+ENDPROC(pax_enter_kernel_user)
23320+
23321+ENTRY(pax_exit_kernel_user)
23322+ pushq %rdi
23323+ pushq %rbx
23324+
23325+#ifdef CONFIG_PARAVIRT
23326+ PV_SAVE_REGS(CLBR_RDI)
23327+#endif
23328+
23329+ GET_CR3_INTO_RDI
23330+ 661: jmp 1f
23331+ .pushsection .altinstr_replacement, "a"
23332+ 662: ASM_NOP2
23333+ .popsection
23334+ .pushsection .altinstructions, "a"
23335+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
23336+ .popsection
23337+ cmp $0,%dil
23338+ jnz 3f
23339+ add $4097,%rdi
23340+ bts $63,%rdi
23341+ SET_RDI_INTO_CR3
23342+ jmp 2f
23343+1:
23344+
23345+ mov %rdi,%rbx
23346+
23347+#ifdef CONFIG_PAX_KERNEXEC
23348+ GET_CR0_INTO_RDI
23349+ btr $X86_CR0_WP_BIT,%rdi
23350+ jnc 3f
23351+ SET_RDI_INTO_CR0
23352+#endif
23353+
23354+ add $__START_KERNEL_map,%rbx
23355+ sub phys_base(%rip),%rbx
23356+
23357+#ifdef CONFIG_PARAVIRT
23358+ cmpl $0, pv_info+PARAVIRT_enabled
23359+ jz 1f
23360+ i = 0
23361+ .rept USER_PGD_PTRS
23362+ mov i*8(%rbx),%rsi
23363+ mov $0x67,%sil
23364+ lea i*8(%rbx),%rdi
23365+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
23366+ i = i + 1
23367+ .endr
23368+ jmp 2f
23369+1:
23370+#endif
23371+
23372+ i = 0
23373+ .rept USER_PGD_PTRS
23374+ movb $0x67,i*8(%rbx)
23375+ i = i + 1
23376+ .endr
23377+2:
23378+
23379+#ifdef CONFIG_PARAVIRT
23380+ PV_RESTORE_REGS(CLBR_RDI)
23381+#endif
23382+
23383+ popq %rbx
23384+ popq %rdi
23385+ pax_force_retaddr
23386+ retq
23387+3: ud2
23388+ENDPROC(pax_exit_kernel_user)
23389+#endif
23390+
23391+ .macro pax_enter_kernel_nmi
23392+ pax_set_fptr_mask
23393+
23394+#ifdef CONFIG_PAX_KERNEXEC
23395+ GET_CR0_INTO_RDI
23396+ bts $X86_CR0_WP_BIT,%rdi
23397+ jc 110f
23398+ SET_RDI_INTO_CR0
23399+ or $2,%ebx
23400+110:
23401+#endif
23402+
23403+#ifdef CONFIG_PAX_MEMORY_UDEREF
23404+ 661: jmp 111f
23405+ .pushsection .altinstr_replacement, "a"
23406+ 662: ASM_NOP2
23407+ .popsection
23408+ .pushsection .altinstructions, "a"
23409+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
23410+ .popsection
23411+ GET_CR3_INTO_RDI
23412+ cmp $0,%dil
23413+ jz 111f
23414+ sub $4097,%rdi
23415+ or $4,%ebx
23416+ bts $63,%rdi
23417+ SET_RDI_INTO_CR3
23418+ mov $__UDEREF_KERNEL_DS,%edi
23419+ mov %edi,%ss
23420+111:
23421+#endif
23422+ .endm
23423+
23424+ .macro pax_exit_kernel_nmi
23425+#ifdef CONFIG_PAX_KERNEXEC
23426+ btr $1,%ebx
23427+ jnc 110f
23428+ GET_CR0_INTO_RDI
23429+ btr $X86_CR0_WP_BIT,%rdi
23430+ SET_RDI_INTO_CR0
23431+110:
23432+#endif
23433+
23434+#ifdef CONFIG_PAX_MEMORY_UDEREF
23435+ btr $2,%ebx
23436+ jnc 111f
23437+ GET_CR3_INTO_RDI
23438+ add $4097,%rdi
23439+ bts $63,%rdi
23440+ SET_RDI_INTO_CR3
23441+ mov $__KERNEL_DS,%edi
23442+ mov %edi,%ss
23443+111:
23444+#endif
23445+ .endm
23446+
23447+ .macro pax_erase_kstack
23448+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
23449+ call pax_erase_kstack
23450+#endif
23451+ .endm
23452+
23453+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
23454+ENTRY(pax_erase_kstack)
23455+ pushq %rdi
23456+ pushq %rcx
23457+ pushq %rax
23458+ pushq %r11
23459+
23460+ GET_THREAD_INFO(%r11)
23461+ mov TI_lowest_stack(%r11), %rdi
23462+ mov $-0xBEEF, %rax
23463+ std
23464+
23465+1: mov %edi, %ecx
23466+ and $THREAD_SIZE_asm - 1, %ecx
23467+ shr $3, %ecx
23468+ repne scasq
23469+ jecxz 2f
23470+
23471+ cmp $2*8, %ecx
23472+ jc 2f
23473+
23474+ mov $2*8, %ecx
23475+ repe scasq
23476+ jecxz 2f
23477+ jne 1b
23478+
23479+2: cld
23480+ or $2*8, %rdi
23481+ mov %esp, %ecx
23482+ sub %edi, %ecx
23483+
23484+ cmp $THREAD_SIZE_asm, %rcx
23485+ jb 3f
23486+ ud2
23487+3:
23488+
23489+ shr $3, %ecx
23490+ rep stosq
23491+
23492+ mov TI_task_thread_sp0(%r11), %rdi
23493+ sub $256, %rdi
23494+ mov %rdi, TI_lowest_stack(%r11)
23495+
23496+ popq %r11
23497+ popq %rax
23498+ popq %rcx
23499+ popq %rdi
23500+ pax_force_retaddr
23501+ ret
23502+ENDPROC(pax_erase_kstack)
23503+#endif
23504
23505 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
23506 #ifdef CONFIG_TRACE_IRQFLAGS
23507@@ -117,7 +544,7 @@ ENDPROC(native_usergs_sysret64)
23508 .endm
23509
23510 .macro TRACE_IRQS_IRETQ_DEBUG offset=ARGOFFSET
23511- bt $9,EFLAGS-\offset(%rsp) /* interrupts off? */
23512+ bt $X86_EFLAGS_IF_BIT,EFLAGS-\offset(%rsp) /* interrupts off? */
23513 jnc 1f
23514 TRACE_IRQS_ON_DEBUG
23515 1:
23516@@ -243,9 +670,52 @@ ENTRY(save_paranoid)
23517 js 1f /* negative -> in kernel */
23518 SWAPGS
23519 xorl %ebx,%ebx
23520-1: ret
23521+1:
23522+#ifdef CONFIG_PAX_MEMORY_UDEREF
23523+ testb $3, CS+8(%rsp)
23524+ jnz 1f
23525+ pax_enter_kernel
23526+ jmp 2f
23527+1: pax_enter_kernel_user
23528+2:
23529+#else
23530+ pax_enter_kernel
23531+#endif
23532+ pax_force_retaddr
23533+ ret
23534 CFI_ENDPROC
23535-END(save_paranoid)
23536+ENDPROC(save_paranoid)
23537+
23538+ENTRY(save_paranoid_nmi)
23539+ XCPT_FRAME 1 RDI+8
23540+ cld
23541+ movq_cfi rdi, RDI+8
23542+ movq_cfi rsi, RSI+8
23543+ movq_cfi rdx, RDX+8
23544+ movq_cfi rcx, RCX+8
23545+ movq_cfi rax, RAX+8
23546+ movq_cfi r8, R8+8
23547+ movq_cfi r9, R9+8
23548+ movq_cfi r10, R10+8
23549+ movq_cfi r11, R11+8
23550+ movq_cfi rbx, RBX+8
23551+ movq_cfi rbp, RBP+8
23552+ movq_cfi r12, R12+8
23553+ movq_cfi r13, R13+8
23554+ movq_cfi r14, R14+8
23555+ movq_cfi r15, R15+8
23556+ movl $1,%ebx
23557+ movl $MSR_GS_BASE,%ecx
23558+ rdmsr
23559+ testl %edx,%edx
23560+ js 1f /* negative -> in kernel */
23561+ SWAPGS
23562+ xorl %ebx,%ebx
23563+1: pax_enter_kernel_nmi
23564+ pax_force_retaddr
23565+ ret
23566+ CFI_ENDPROC
23567+ENDPROC(save_paranoid_nmi)
23568
23569 /*
23570 * A newly forked process directly context switches into this address.
23571@@ -266,7 +736,7 @@ ENTRY(ret_from_fork)
23572
23573 RESTORE_REST
23574
23575- testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
23576+ testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
23577 jz 1f
23578
23579 /*
23580@@ -279,15 +749,13 @@ ENTRY(ret_from_fork)
23581 jmp int_ret_from_sys_call
23582
23583 1:
23584- subq $REST_SKIP, %rsp # leave space for volatiles
23585- CFI_ADJUST_CFA_OFFSET REST_SKIP
23586 movq %rbp, %rdi
23587 call *%rbx
23588 movl $0, RAX(%rsp)
23589 RESTORE_REST
23590 jmp int_ret_from_sys_call
23591 CFI_ENDPROC
23592-END(ret_from_fork)
23593+ENDPROC(ret_from_fork)
23594
23595 /*
23596 * System call entry. Up to 6 arguments in registers are supported.
23597@@ -324,7 +792,7 @@ END(ret_from_fork)
23598 ENTRY(system_call)
23599 CFI_STARTPROC simple
23600 CFI_SIGNAL_FRAME
23601- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
23602+ CFI_DEF_CFA rsp,0
23603 CFI_REGISTER rip,rcx
23604 /*CFI_REGISTER rflags,r11*/
23605 SWAPGS_UNSAFE_STACK
23606@@ -337,16 +805,23 @@ GLOBAL(system_call_after_swapgs)
23607
23608 movq %rsp,PER_CPU_VAR(old_rsp)
23609 movq PER_CPU_VAR(kernel_stack),%rsp
23610+ SAVE_ARGS 8*6, 0, rax_enosys=1
23611+ pax_enter_kernel_user
23612+
23613+#ifdef CONFIG_PAX_RANDKSTACK
23614+ pax_erase_kstack
23615+#endif
23616+
23617 /*
23618 * No need to follow this irqs off/on section - it's straight
23619 * and short:
23620 */
23621 ENABLE_INTERRUPTS(CLBR_NONE)
23622- SAVE_ARGS 8, 0, rax_enosys=1
23623 movq_cfi rax,(ORIG_RAX-ARGOFFSET)
23624 movq %rcx,RIP-ARGOFFSET(%rsp)
23625 CFI_REL_OFFSET rip,RIP-ARGOFFSET
23626- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
23627+ GET_THREAD_INFO(%rcx)
23628+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%rcx)
23629 jnz tracesys
23630 system_call_fastpath:
23631 #if __SYSCALL_MASK == ~0
23632@@ -376,10 +851,13 @@ ret_from_sys_call:
23633 * flags (TIF_NOTIFY_RESUME, TIF_USER_RETURN_NOTIFY, etc) set is
23634 * very bad.
23635 */
23636- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
23637+ GET_THREAD_INFO(%rcx)
23638+ testl $_TIF_ALLWORK_MASK,TI_flags(%rcx)
23639 jnz int_ret_from_sys_call_fixup /* Go the the slow path */
23640
23641 CFI_REMEMBER_STATE
23642+ pax_exit_kernel_user
23643+ pax_erase_kstack
23644 /*
23645 * sysretq will re-enable interrupts:
23646 */
23647@@ -399,12 +877,15 @@ int_ret_from_sys_call_fixup:
23648
23649 /* Do syscall tracing */
23650 tracesys:
23651- leaq -REST_SKIP(%rsp), %rdi
23652+ movq %rsp, %rdi
23653 movq $AUDIT_ARCH_X86_64, %rsi
23654 call syscall_trace_enter_phase1
23655 test %rax, %rax
23656 jnz tracesys_phase2 /* if needed, run the slow path */
23657- LOAD_ARGS 0 /* else restore clobbered regs */
23658+
23659+ pax_erase_kstack
23660+
23661+ LOAD_ARGS /* else restore clobbered regs */
23662 jmp system_call_fastpath /* and return to the fast path */
23663
23664 tracesys_phase2:
23665@@ -415,12 +896,14 @@ tracesys_phase2:
23666 movq %rax,%rdx
23667 call syscall_trace_enter_phase2
23668
23669+ pax_erase_kstack
23670+
23671 /*
23672 * Reload arg registers from stack in case ptrace changed them.
23673 * We don't reload %rax because syscall_trace_entry_phase2() returned
23674 * the value it wants us to use in the table lookup.
23675 */
23676- LOAD_ARGS ARGOFFSET, 1
23677+ LOAD_ARGS 1
23678 RESTORE_REST
23679 #if __SYSCALL_MASK == ~0
23680 cmpq $__NR_syscall_max,%rax
23681@@ -451,7 +934,9 @@ GLOBAL(int_with_check)
23682 andl %edi,%edx
23683 jnz int_careful
23684 andl $~TS_COMPAT,TI_status(%rcx)
23685- jmp retint_swapgs
23686+ pax_exit_kernel_user
23687+ pax_erase_kstack
23688+ jmp retint_swapgs_pax
23689
23690 /* Either reschedule or signal or syscall exit tracking needed. */
23691 /* First do a reschedule test. */
23692@@ -497,7 +982,7 @@ int_restore_rest:
23693 TRACE_IRQS_OFF
23694 jmp int_with_check
23695 CFI_ENDPROC
23696-END(system_call)
23697+ENDPROC(system_call)
23698
23699 .macro FORK_LIKE func
23700 ENTRY(stub_\func)
23701@@ -510,9 +995,10 @@ ENTRY(stub_\func)
23702 DEFAULT_FRAME 0 8 /* offset 8: return address */
23703 call sys_\func
23704 RESTORE_TOP_OF_STACK %r11, 8
23705- ret $REST_SKIP /* pop extended registers */
23706+ pax_force_retaddr
23707+ ret
23708 CFI_ENDPROC
23709-END(stub_\func)
23710+ENDPROC(stub_\func)
23711 .endm
23712
23713 .macro FIXED_FRAME label,func
23714@@ -522,9 +1008,10 @@ ENTRY(\label)
23715 FIXUP_TOP_OF_STACK %r11, 8-ARGOFFSET
23716 call \func
23717 RESTORE_TOP_OF_STACK %r11, 8-ARGOFFSET
23718+ pax_force_retaddr
23719 ret
23720 CFI_ENDPROC
23721-END(\label)
23722+ENDPROC(\label)
23723 .endm
23724
23725 FORK_LIKE clone
23726@@ -543,7 +1030,7 @@ ENTRY(stub_execve)
23727 RESTORE_REST
23728 jmp int_ret_from_sys_call
23729 CFI_ENDPROC
23730-END(stub_execve)
23731+ENDPROC(stub_execve)
23732
23733 ENTRY(stub_execveat)
23734 CFI_STARTPROC
23735@@ -557,7 +1044,7 @@ ENTRY(stub_execveat)
23736 RESTORE_REST
23737 jmp int_ret_from_sys_call
23738 CFI_ENDPROC
23739-END(stub_execveat)
23740+ENDPROC(stub_execveat)
23741
23742 /*
23743 * sigreturn is special because it needs to restore all registers on return.
23744@@ -574,7 +1061,7 @@ ENTRY(stub_rt_sigreturn)
23745 RESTORE_REST
23746 jmp int_ret_from_sys_call
23747 CFI_ENDPROC
23748-END(stub_rt_sigreturn)
23749+ENDPROC(stub_rt_sigreturn)
23750
23751 #ifdef CONFIG_X86_X32_ABI
23752 ENTRY(stub_x32_rt_sigreturn)
23753@@ -588,7 +1075,7 @@ ENTRY(stub_x32_rt_sigreturn)
23754 RESTORE_REST
23755 jmp int_ret_from_sys_call
23756 CFI_ENDPROC
23757-END(stub_x32_rt_sigreturn)
23758+ENDPROC(stub_x32_rt_sigreturn)
23759
23760 ENTRY(stub_x32_execve)
23761 CFI_STARTPROC
23762@@ -602,7 +1089,7 @@ ENTRY(stub_x32_execve)
23763 RESTORE_REST
23764 jmp int_ret_from_sys_call
23765 CFI_ENDPROC
23766-END(stub_x32_execve)
23767+ENDPROC(stub_x32_execve)
23768
23769 ENTRY(stub_x32_execveat)
23770 CFI_STARTPROC
23771@@ -616,7 +1103,7 @@ ENTRY(stub_x32_execveat)
23772 RESTORE_REST
23773 jmp int_ret_from_sys_call
23774 CFI_ENDPROC
23775-END(stub_x32_execveat)
23776+ENDPROC(stub_x32_execveat)
23777
23778 #endif
23779
23780@@ -653,7 +1140,7 @@ vector=vector+1
23781 2: jmp common_interrupt
23782 .endr
23783 CFI_ENDPROC
23784-END(irq_entries_start)
23785+ENDPROC(irq_entries_start)
23786
23787 .previous
23788 END(interrupt)
23789@@ -670,28 +1157,29 @@ END(interrupt)
23790 /* 0(%rsp): ~(interrupt number) */
23791 .macro interrupt func
23792 /* reserve pt_regs for scratch regs and rbp */
23793- subq $ORIG_RAX-RBP, %rsp
23794- CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
23795+ subq $ORIG_RAX, %rsp
23796+ CFI_ADJUST_CFA_OFFSET ORIG_RAX
23797 cld
23798- /* start from rbp in pt_regs and jump over */
23799- movq_cfi rdi, (RDI-RBP)
23800- movq_cfi rsi, (RSI-RBP)
23801- movq_cfi rdx, (RDX-RBP)
23802- movq_cfi rcx, (RCX-RBP)
23803- movq_cfi rax, (RAX-RBP)
23804- movq_cfi r8, (R8-RBP)
23805- movq_cfi r9, (R9-RBP)
23806- movq_cfi r10, (R10-RBP)
23807- movq_cfi r11, (R11-RBP)
23808+ /* start from r15 in pt_regs and jump over */
23809+ movq_cfi rdi, RDI
23810+ movq_cfi rsi, RSI
23811+ movq_cfi rdx, RDX
23812+ movq_cfi rcx, RCX
23813+ movq_cfi rax, RAX
23814+ movq_cfi r8, R8
23815+ movq_cfi r9, R9
23816+ movq_cfi r10, R10
23817+ movq_cfi r11, R11
23818+ movq_cfi r12, R12
23819
23820 /* Save rbp so that we can unwind from get_irq_regs() */
23821- movq_cfi rbp, 0
23822+ movq_cfi rbp, RBP
23823
23824 /* Save previous stack value */
23825 movq %rsp, %rsi
23826
23827- leaq -RBP(%rsp),%rdi /* arg1 for handler */
23828- testl $3, CS-RBP(%rsi)
23829+ movq %rsp,%rdi /* arg1 for handler */
23830+ testb $3, CS(%rsi)
23831 je 1f
23832 SWAPGS
23833 /*
23834@@ -711,6 +1199,18 @@ END(interrupt)
23835 0x06 /* DW_OP_deref */, \
23836 0x08 /* DW_OP_const1u */, SS+8-RBP, \
23837 0x22 /* DW_OP_plus */
23838+
23839+#ifdef CONFIG_PAX_MEMORY_UDEREF
23840+ testb $3, CS(%rdi)
23841+ jnz 1f
23842+ pax_enter_kernel
23843+ jmp 2f
23844+1: pax_enter_kernel_user
23845+2:
23846+#else
23847+ pax_enter_kernel
23848+#endif
23849+
23850 /* We entered an interrupt context - irqs are off: */
23851 TRACE_IRQS_OFF
23852
23853@@ -735,14 +1235,14 @@ ret_from_intr:
23854
23855 /* Restore saved previous stack */
23856 popq %rsi
23857- CFI_DEF_CFA rsi,SS+8-RBP /* reg/off reset after def_cfa_expr */
23858- leaq ARGOFFSET-RBP(%rsi), %rsp
23859+ CFI_DEF_CFA rsi,SS+8 /* reg/off reset after def_cfa_expr */
23860+ movq %rsi, %rsp
23861 CFI_DEF_CFA_REGISTER rsp
23862- CFI_ADJUST_CFA_OFFSET RBP-ARGOFFSET
23863+ CFI_ADJUST_CFA_OFFSET -ARGOFFSET
23864
23865 exit_intr:
23866 GET_THREAD_INFO(%rcx)
23867- testl $3,CS-ARGOFFSET(%rsp)
23868+ testb $3,CS-ARGOFFSET(%rsp)
23869 je retint_kernel
23870
23871 /* Interrupt came from user space */
23872@@ -764,14 +1264,16 @@ retint_swapgs: /* return to user-space */
23873 * The iretq could re-enable interrupts:
23874 */
23875 DISABLE_INTERRUPTS(CLBR_ANY)
23876+ pax_exit_kernel_user
23877+retint_swapgs_pax:
23878 TRACE_IRQS_IRETQ
23879
23880 /*
23881 * Try to use SYSRET instead of IRET if we're returning to
23882 * a completely clean 64-bit userspace context.
23883 */
23884- movq (RCX-R11)(%rsp), %rcx
23885- cmpq %rcx,(RIP-R11)(%rsp) /* RCX == RIP */
23886+ movq (RCX-ARGOFFSET)(%rsp), %rcx
23887+ cmpq %rcx,(RIP-ARGOFFSET)(%rsp) /* RCX == RIP */
23888 jne opportunistic_sysret_failed
23889
23890 /*
23891@@ -792,7 +1294,7 @@ retint_swapgs: /* return to user-space */
23892 shr $__VIRTUAL_MASK_SHIFT, %rcx
23893 jnz opportunistic_sysret_failed
23894
23895- cmpq $__USER_CS,(CS-R11)(%rsp) /* CS must match SYSRET */
23896+ cmpq $__USER_CS,(CS-ARGOFFSET)(%rsp) /* CS must match SYSRET */
23897 jne opportunistic_sysret_failed
23898
23899 movq (R11-ARGOFFSET)(%rsp), %r11
23900@@ -838,6 +1340,27 @@ opportunistic_sysret_failed:
23901
23902 retint_restore_args: /* return to kernel space */
23903 DISABLE_INTERRUPTS(CLBR_ANY)
23904+ pax_exit_kernel
23905+
23906+#if defined(CONFIG_EFI) && defined(CONFIG_PAX_KERNEXEC)
23907+ /* This is a quirk to allow IRQs/NMIs/MCEs during early EFI setup,
23908+ * namely calling EFI runtime services with a phys mapping. We're
23909+ * starting off with NOPs and patch in the real instrumentation
23910+ * (BTS/OR) before starting any userland process; even before starting
23911+ * up the APs.
23912+ */
23913+ .pushsection .altinstr_replacement, "a"
23914+ 601: pax_force_retaddr (RIP-ARGOFFSET)
23915+ 602:
23916+ .popsection
23917+ 603: .fill 602b-601b, 1, 0x90
23918+ .pushsection .altinstructions, "a"
23919+ altinstruction_entry 603b, 601b, X86_FEATURE_ALWAYS, 602b-601b, 602b-601b
23920+ .popsection
23921+#else
23922+ pax_force_retaddr (RIP-ARGOFFSET)
23923+#endif
23924+
23925 /*
23926 * The iretq could re-enable interrupts:
23927 */
23928@@ -875,15 +1398,15 @@ native_irq_return_ldt:
23929 SWAPGS
23930 movq PER_CPU_VAR(espfix_waddr),%rdi
23931 movq %rax,(0*8)(%rdi) /* RAX */
23932- movq (2*8)(%rsp),%rax /* RIP */
23933+ movq (2*8 + RIP-RIP)(%rsp),%rax /* RIP */
23934 movq %rax,(1*8)(%rdi)
23935- movq (3*8)(%rsp),%rax /* CS */
23936+ movq (2*8 + CS-RIP)(%rsp),%rax /* CS */
23937 movq %rax,(2*8)(%rdi)
23938- movq (4*8)(%rsp),%rax /* RFLAGS */
23939+ movq (2*8 + EFLAGS-RIP)(%rsp),%rax /* RFLAGS */
23940 movq %rax,(3*8)(%rdi)
23941- movq (6*8)(%rsp),%rax /* SS */
23942+ movq (2*8 + SS-RIP)(%rsp),%rax /* SS */
23943 movq %rax,(5*8)(%rdi)
23944- movq (5*8)(%rsp),%rax /* RSP */
23945+ movq (2*8 + RSP-RIP)(%rsp),%rax /* RSP */
23946 movq %rax,(4*8)(%rdi)
23947 andl $0xffff0000,%eax
23948 popq_cfi %rdi
23949@@ -937,7 +1460,7 @@ ENTRY(retint_kernel)
23950 jmp exit_intr
23951 #endif
23952 CFI_ENDPROC
23953-END(common_interrupt)
23954+ENDPROC(common_interrupt)
23955
23956 /*
23957 * APIC interrupts.
23958@@ -951,7 +1474,7 @@ ENTRY(\sym)
23959 interrupt \do_sym
23960 jmp ret_from_intr
23961 CFI_ENDPROC
23962-END(\sym)
23963+ENDPROC(\sym)
23964 .endm
23965
23966 #ifdef CONFIG_TRACING
23967@@ -1024,7 +1547,7 @@ apicinterrupt IRQ_WORK_VECTOR \
23968 /*
23969 * Exception entry points.
23970 */
23971-#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
23972+#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r13)
23973
23974 .macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1
23975 ENTRY(\sym)
23976@@ -1080,6 +1603,12 @@ ENTRY(\sym)
23977 .endif
23978
23979 .if \shift_ist != -1
23980+#ifdef CONFIG_SMP
23981+ imul $TSS_size, PER_CPU_VAR(cpu_number), %r13d
23982+ lea init_tss(%r13), %r13
23983+#else
23984+ lea init_tss(%rip), %r13
23985+#endif
23986 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\shift_ist)
23987 .endif
23988
23989@@ -1126,7 +1655,7 @@ ENTRY(\sym)
23990 .endif
23991
23992 CFI_ENDPROC
23993-END(\sym)
23994+ENDPROC(\sym)
23995 .endm
23996
23997 #ifdef CONFIG_TRACING
23998@@ -1167,9 +1696,10 @@ gs_change:
23999 2: mfence /* workaround */
24000 SWAPGS
24001 popfq_cfi
24002+ pax_force_retaddr
24003 ret
24004 CFI_ENDPROC
24005-END(native_load_gs_index)
24006+ENDPROC(native_load_gs_index)
24007
24008 _ASM_EXTABLE(gs_change,bad_gs)
24009 .section .fixup,"ax"
24010@@ -1197,9 +1727,10 @@ ENTRY(do_softirq_own_stack)
24011 CFI_DEF_CFA_REGISTER rsp
24012 CFI_ADJUST_CFA_OFFSET -8
24013 decl PER_CPU_VAR(irq_count)
24014+ pax_force_retaddr
24015 ret
24016 CFI_ENDPROC
24017-END(do_softirq_own_stack)
24018+ENDPROC(do_softirq_own_stack)
24019
24020 #ifdef CONFIG_XEN
24021 idtentry xen_hypervisor_callback xen_do_hypervisor_callback has_error_code=0
24022@@ -1240,7 +1771,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
24023 #endif
24024 jmp error_exit
24025 CFI_ENDPROC
24026-END(xen_do_hypervisor_callback)
24027+ENDPROC(xen_do_hypervisor_callback)
24028
24029 /*
24030 * Hypervisor uses this for application faults while it executes.
24031@@ -1299,7 +1830,7 @@ ENTRY(xen_failsafe_callback)
24032 SAVE_ALL
24033 jmp error_exit
24034 CFI_ENDPROC
24035-END(xen_failsafe_callback)
24036+ENDPROC(xen_failsafe_callback)
24037
24038 apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \
24039 xen_hvm_callback_vector xen_evtchn_do_upcall
24040@@ -1344,18 +1875,25 @@ ENTRY(paranoid_exit)
24041 DEFAULT_FRAME
24042 DISABLE_INTERRUPTS(CLBR_NONE)
24043 TRACE_IRQS_OFF_DEBUG
24044- testl %ebx,%ebx /* swapgs needed? */
24045+ testl $1,%ebx /* swapgs needed? */
24046 jnz paranoid_restore
24047+#ifdef CONFIG_PAX_MEMORY_UDEREF
24048+ pax_exit_kernel_user
24049+#else
24050+ pax_exit_kernel
24051+#endif
24052 TRACE_IRQS_IRETQ 0
24053 SWAPGS_UNSAFE_STACK
24054 RESTORE_ALL 8
24055 INTERRUPT_RETURN
24056 paranoid_restore:
24057+ pax_exit_kernel
24058 TRACE_IRQS_IRETQ_DEBUG 0
24059 RESTORE_ALL 8
24060+ pax_force_retaddr_bts
24061 INTERRUPT_RETURN
24062 CFI_ENDPROC
24063-END(paranoid_exit)
24064+ENDPROC(paranoid_exit)
24065
24066 /*
24067 * Exception entry point. This expects an error code/orig_rax on the stack.
24068@@ -1382,12 +1920,23 @@ ENTRY(error_entry)
24069 movq %r14, R14+8(%rsp)
24070 movq %r15, R15+8(%rsp)
24071 xorl %ebx,%ebx
24072- testl $3,CS+8(%rsp)
24073+ testb $3,CS+8(%rsp)
24074 je error_kernelspace
24075 error_swapgs:
24076 SWAPGS
24077 error_sti:
24078+#ifdef CONFIG_PAX_MEMORY_UDEREF
24079+ testb $3, CS+8(%rsp)
24080+ jnz 1f
24081+ pax_enter_kernel
24082+ jmp 2f
24083+1: pax_enter_kernel_user
24084+2:
24085+#else
24086+ pax_enter_kernel
24087+#endif
24088 TRACE_IRQS_OFF
24089+ pax_force_retaddr
24090 ret
24091
24092 /*
24093@@ -1422,7 +1971,7 @@ error_bad_iret:
24094 decl %ebx /* Return to usergs */
24095 jmp error_sti
24096 CFI_ENDPROC
24097-END(error_entry)
24098+ENDPROC(error_entry)
24099
24100
24101 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
24102@@ -1433,7 +1982,7 @@ ENTRY(error_exit)
24103 DISABLE_INTERRUPTS(CLBR_NONE)
24104 TRACE_IRQS_OFF
24105 GET_THREAD_INFO(%rcx)
24106- testl %eax,%eax
24107+ testl $1,%eax
24108 jne retint_kernel
24109 LOCKDEP_SYS_EXIT_IRQ
24110 movl TI_flags(%rcx),%edx
24111@@ -1442,7 +1991,7 @@ ENTRY(error_exit)
24112 jnz retint_careful
24113 jmp retint_swapgs
24114 CFI_ENDPROC
24115-END(error_exit)
24116+ENDPROC(error_exit)
24117
24118 /*
24119 * Test if a given stack is an NMI stack or not.
24120@@ -1500,9 +2049,11 @@ ENTRY(nmi)
24121 * If %cs was not the kernel segment, then the NMI triggered in user
24122 * space, which means it is definitely not nested.
24123 */
24124+ cmpl $__KERNEXEC_KERNEL_CS, 16(%rsp)
24125+ je 1f
24126 cmpl $__KERNEL_CS, 16(%rsp)
24127 jne first_nmi
24128-
24129+1:
24130 /*
24131 * Check the special variable on the stack to see if NMIs are
24132 * executing.
24133@@ -1536,8 +2087,7 @@ nested_nmi:
24134
24135 1:
24136 /* Set up the interrupted NMIs stack to jump to repeat_nmi */
24137- leaq -1*8(%rsp), %rdx
24138- movq %rdx, %rsp
24139+ subq $8, %rsp
24140 CFI_ADJUST_CFA_OFFSET 1*8
24141 leaq -10*8(%rsp), %rdx
24142 pushq_cfi $__KERNEL_DS
24143@@ -1555,6 +2105,7 @@ nested_nmi_out:
24144 CFI_RESTORE rdx
24145
24146 /* No need to check faults here */
24147+# pax_force_retaddr_bts
24148 INTERRUPT_RETURN
24149
24150 CFI_RESTORE_STATE
24151@@ -1651,13 +2202,13 @@ end_repeat_nmi:
24152 subq $ORIG_RAX-R15, %rsp
24153 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
24154 /*
24155- * Use save_paranoid to handle SWAPGS, but no need to use paranoid_exit
24156+ * Use save_paranoid_nmi to handle SWAPGS, but no need to use paranoid_exit
24157 * as we should not be calling schedule in NMI context.
24158 * Even with normal interrupts enabled. An NMI should not be
24159 * setting NEED_RESCHED or anything that normal interrupts and
24160 * exceptions might do.
24161 */
24162- call save_paranoid
24163+ call save_paranoid_nmi
24164 DEFAULT_FRAME 0
24165
24166 /*
24167@@ -1667,9 +2218,9 @@ end_repeat_nmi:
24168 * NMI itself takes a page fault, the page fault that was preempted
24169 * will read the information from the NMI page fault and not the
24170 * origin fault. Save it off and restore it if it changes.
24171- * Use the r12 callee-saved register.
24172+ * Use the r13 callee-saved register.
24173 */
24174- movq %cr2, %r12
24175+ movq %cr2, %r13
24176
24177 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
24178 movq %rsp,%rdi
24179@@ -1678,29 +2229,34 @@ end_repeat_nmi:
24180
24181 /* Did the NMI take a page fault? Restore cr2 if it did */
24182 movq %cr2, %rcx
24183- cmpq %rcx, %r12
24184+ cmpq %rcx, %r13
24185 je 1f
24186- movq %r12, %cr2
24187+ movq %r13, %cr2
24188 1:
24189
24190- testl %ebx,%ebx /* swapgs needed? */
24191+ testl $1,%ebx /* swapgs needed? */
24192 jnz nmi_restore
24193 nmi_swapgs:
24194 SWAPGS_UNSAFE_STACK
24195 nmi_restore:
24196+ pax_exit_kernel_nmi
24197 /* Pop the extra iret frame at once */
24198 RESTORE_ALL 6*8
24199+ testb $3, 8(%rsp)
24200+ jnz 1f
24201+ pax_force_retaddr_bts
24202+1:
24203
24204 /* Clear the NMI executing stack variable */
24205 movq $0, 5*8(%rsp)
24206 jmp irq_return
24207 CFI_ENDPROC
24208-END(nmi)
24209+ENDPROC(nmi)
24210
24211 ENTRY(ignore_sysret)
24212 CFI_STARTPROC
24213 mov $-ENOSYS,%eax
24214 sysret
24215 CFI_ENDPROC
24216-END(ignore_sysret)
24217+ENDPROC(ignore_sysret)
24218
24219diff --git a/arch/x86/kernel/espfix_64.c b/arch/x86/kernel/espfix_64.c
24220index f5d0730..5bce89c 100644
24221--- a/arch/x86/kernel/espfix_64.c
24222+++ b/arch/x86/kernel/espfix_64.c
24223@@ -70,8 +70,7 @@ static DEFINE_MUTEX(espfix_init_mutex);
24224 #define ESPFIX_MAX_PAGES DIV_ROUND_UP(CONFIG_NR_CPUS, ESPFIX_STACKS_PER_PAGE)
24225 static void *espfix_pages[ESPFIX_MAX_PAGES];
24226
24227-static __page_aligned_bss pud_t espfix_pud_page[PTRS_PER_PUD]
24228- __aligned(PAGE_SIZE);
24229+static pud_t espfix_pud_page[PTRS_PER_PUD] __page_aligned_rodata;
24230
24231 static unsigned int page_random, slot_random;
24232
24233@@ -122,11 +121,17 @@ static void init_espfix_random(void)
24234 void __init init_espfix_bsp(void)
24235 {
24236 pgd_t *pgd_p;
24237+ unsigned long index = pgd_index(ESPFIX_BASE_ADDR);
24238
24239 /* Install the espfix pud into the kernel page directory */
24240- pgd_p = &init_level4_pgt[pgd_index(ESPFIX_BASE_ADDR)];
24241+ pgd_p = &init_level4_pgt[index];
24242 pgd_populate(&init_mm, pgd_p, (pud_t *)espfix_pud_page);
24243
24244+#ifdef CONFIG_PAX_PER_CPU_PGD
24245+ clone_pgd_range(get_cpu_pgd(0, kernel) + index, swapper_pg_dir + index, 1);
24246+ clone_pgd_range(get_cpu_pgd(0, user) + index, swapper_pg_dir + index, 1);
24247+#endif
24248+
24249 /* Randomize the locations */
24250 init_espfix_random();
24251
24252@@ -194,7 +199,7 @@ void init_espfix_ap(void)
24253 set_pte(&pte_p[n*PTE_STRIDE], pte);
24254
24255 /* Job is done for this CPU and any CPU which shares this page */
24256- ACCESS_ONCE(espfix_pages[page]) = stack_page;
24257+ ACCESS_ONCE_RW(espfix_pages[page]) = stack_page;
24258
24259 unlock_done:
24260 mutex_unlock(&espfix_init_mutex);
24261diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
24262index 8b7b0a5..02219db 100644
24263--- a/arch/x86/kernel/ftrace.c
24264+++ b/arch/x86/kernel/ftrace.c
24265@@ -89,7 +89,7 @@ static unsigned long text_ip_addr(unsigned long ip)
24266 * kernel identity mapping to modify code.
24267 */
24268 if (within(ip, (unsigned long)_text, (unsigned long)_etext))
24269- ip = (unsigned long)__va(__pa_symbol(ip));
24270+ ip = (unsigned long)__va(__pa_symbol(ktla_ktva(ip)));
24271
24272 return ip;
24273 }
24274@@ -105,6 +105,8 @@ ftrace_modify_code_direct(unsigned long ip, unsigned const char *old_code,
24275 {
24276 unsigned char replaced[MCOUNT_INSN_SIZE];
24277
24278+ ip = ktla_ktva(ip);
24279+
24280 /*
24281 * Note: Due to modules and __init, code can
24282 * disappear and change, we need to protect against faulting
24283@@ -230,7 +232,7 @@ static int update_ftrace_func(unsigned long ip, void *new)
24284 unsigned char old[MCOUNT_INSN_SIZE];
24285 int ret;
24286
24287- memcpy(old, (void *)ip, MCOUNT_INSN_SIZE);
24288+ memcpy(old, (void *)ktla_ktva(ip), MCOUNT_INSN_SIZE);
24289
24290 ftrace_update_func = ip;
24291 /* Make sure the breakpoints see the ftrace_update_func update */
24292@@ -311,7 +313,7 @@ static int add_break(unsigned long ip, const char *old)
24293 unsigned char replaced[MCOUNT_INSN_SIZE];
24294 unsigned char brk = BREAKPOINT_INSTRUCTION;
24295
24296- if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
24297+ if (probe_kernel_read(replaced, (void *)ktla_ktva(ip), MCOUNT_INSN_SIZE))
24298 return -EFAULT;
24299
24300 /* Make sure it is what we expect it to be */
24301@@ -670,11 +672,11 @@ static unsigned char *ftrace_jmp_replace(unsigned long ip, unsigned long addr)
24302 /* Module allocation simplifies allocating memory for code */
24303 static inline void *alloc_tramp(unsigned long size)
24304 {
24305- return module_alloc(size);
24306+ return module_alloc_exec(size);
24307 }
24308 static inline void tramp_free(void *tramp)
24309 {
24310- module_memfree(tramp);
24311+ module_memfree_exec(tramp);
24312 }
24313 #else
24314 /* Trampolines can only be created if modules are supported */
24315@@ -753,7 +755,9 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
24316 *tramp_size = size + MCOUNT_INSN_SIZE + sizeof(void *);
24317
24318 /* Copy ftrace_caller onto the trampoline memory */
24319+ pax_open_kernel();
24320 ret = probe_kernel_read(trampoline, (void *)start_offset, size);
24321+ pax_close_kernel();
24322 if (WARN_ON(ret < 0)) {
24323 tramp_free(trampoline);
24324 return 0;
24325@@ -763,6 +767,7 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
24326
24327 /* The trampoline ends with a jmp to ftrace_return */
24328 jmp = ftrace_jmp_replace(ip, (unsigned long)ftrace_return);
24329+ pax_open_kernel();
24330 memcpy(trampoline + size, jmp, MCOUNT_INSN_SIZE);
24331
24332 /*
24333@@ -775,6 +780,7 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
24334
24335 ptr = (unsigned long *)(trampoline + size + MCOUNT_INSN_SIZE);
24336 *ptr = (unsigned long)ops;
24337+ pax_close_kernel();
24338
24339 op_offset -= start_offset;
24340 memcpy(&op_ptr, trampoline + op_offset, OP_REF_SIZE);
24341@@ -792,7 +798,9 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
24342 op_ptr.offset = offset;
24343
24344 /* put in the new offset to the ftrace_ops */
24345+ pax_open_kernel();
24346 memcpy(trampoline + op_offset, &op_ptr, OP_REF_SIZE);
24347+ pax_close_kernel();
24348
24349 /* ALLOC_TRAMP flags lets us know we created it */
24350 ops->flags |= FTRACE_OPS_FL_ALLOC_TRAMP;
24351diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
24352index b111ab5..3d419ea 100644
24353--- a/arch/x86/kernel/head64.c
24354+++ b/arch/x86/kernel/head64.c
24355@@ -68,12 +68,12 @@ again:
24356 pgd = *pgd_p;
24357
24358 /*
24359- * The use of __START_KERNEL_map rather than __PAGE_OFFSET here is
24360- * critical -- __PAGE_OFFSET would point us back into the dynamic
24361+ * The use of __early_va rather than __va here is critical:
24362+ * __va would point us back into the dynamic
24363 * range and we might end up looping forever...
24364 */
24365 if (pgd)
24366- pud_p = (pudval_t *)((pgd & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
24367+ pud_p = (pudval_t *)(__early_va(pgd & PTE_PFN_MASK));
24368 else {
24369 if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
24370 reset_early_page_tables();
24371@@ -83,13 +83,13 @@ again:
24372 pud_p = (pudval_t *)early_dynamic_pgts[next_early_pgt++];
24373 for (i = 0; i < PTRS_PER_PUD; i++)
24374 pud_p[i] = 0;
24375- *pgd_p = (pgdval_t)pud_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
24376+ *pgd_p = (pgdval_t)__pa(pud_p) + _KERNPG_TABLE;
24377 }
24378 pud_p += pud_index(address);
24379 pud = *pud_p;
24380
24381 if (pud)
24382- pmd_p = (pmdval_t *)((pud & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
24383+ pmd_p = (pmdval_t *)(__early_va(pud & PTE_PFN_MASK));
24384 else {
24385 if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
24386 reset_early_page_tables();
24387@@ -99,7 +99,7 @@ again:
24388 pmd_p = (pmdval_t *)early_dynamic_pgts[next_early_pgt++];
24389 for (i = 0; i < PTRS_PER_PMD; i++)
24390 pmd_p[i] = 0;
24391- *pud_p = (pudval_t)pmd_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
24392+ *pud_p = (pudval_t)__pa(pmd_p) + _KERNPG_TABLE;
24393 }
24394 pmd = (physaddr & PMD_MASK) + early_pmd_flags;
24395 pmd_p[pmd_index(address)] = pmd;
24396@@ -180,7 +180,6 @@ asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
24397 if (console_loglevel >= CONSOLE_LOGLEVEL_DEBUG)
24398 early_printk("Kernel alive\n");
24399
24400- clear_page(init_level4_pgt);
24401 /* set init_level4_pgt kernel high mapping*/
24402 init_level4_pgt[511] = early_level4_pgt[511];
24403
24404diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
24405index 30a2aa3..d62e1dd 100644
24406--- a/arch/x86/kernel/head_32.S
24407+++ b/arch/x86/kernel/head_32.S
24408@@ -26,6 +26,12 @@
24409 /* Physical address */
24410 #define pa(X) ((X) - __PAGE_OFFSET)
24411
24412+#ifdef CONFIG_PAX_KERNEXEC
24413+#define ta(X) (X)
24414+#else
24415+#define ta(X) ((X) - __PAGE_OFFSET)
24416+#endif
24417+
24418 /*
24419 * References to members of the new_cpu_data structure.
24420 */
24421@@ -55,11 +61,7 @@
24422 * and small than max_low_pfn, otherwise will waste some page table entries
24423 */
24424
24425-#if PTRS_PER_PMD > 1
24426-#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
24427-#else
24428-#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
24429-#endif
24430+#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
24431
24432 /* Number of possible pages in the lowmem region */
24433 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
24434@@ -78,6 +80,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE
24435 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
24436
24437 /*
24438+ * Real beginning of normal "text" segment
24439+ */
24440+ENTRY(stext)
24441+ENTRY(_stext)
24442+
24443+/*
24444 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
24445 * %esi points to the real-mode code as a 32-bit pointer.
24446 * CS and DS must be 4 GB flat segments, but we don't depend on
24447@@ -85,6 +93,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
24448 * can.
24449 */
24450 __HEAD
24451+
24452+#ifdef CONFIG_PAX_KERNEXEC
24453+ jmp startup_32
24454+/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
24455+.fill PAGE_SIZE-5,1,0xcc
24456+#endif
24457+
24458 ENTRY(startup_32)
24459 movl pa(stack_start),%ecx
24460
24461@@ -106,6 +121,59 @@ ENTRY(startup_32)
24462 2:
24463 leal -__PAGE_OFFSET(%ecx),%esp
24464
24465+#ifdef CONFIG_SMP
24466+ movl $pa(cpu_gdt_table),%edi
24467+ movl $__per_cpu_load,%eax
24468+ movw %ax,GDT_ENTRY_PERCPU * 8 + 2(%edi)
24469+ rorl $16,%eax
24470+ movb %al,GDT_ENTRY_PERCPU * 8 + 4(%edi)
24471+ movb %ah,GDT_ENTRY_PERCPU * 8 + 7(%edi)
24472+ movl $__per_cpu_end - 1,%eax
24473+ subl $__per_cpu_start,%eax
24474+ movw %ax,GDT_ENTRY_PERCPU * 8 + 0(%edi)
24475+#endif
24476+
24477+#ifdef CONFIG_PAX_MEMORY_UDEREF
24478+ movl $NR_CPUS,%ecx
24479+ movl $pa(cpu_gdt_table),%edi
24480+1:
24481+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
24482+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
24483+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
24484+ addl $PAGE_SIZE_asm,%edi
24485+ loop 1b
24486+#endif
24487+
24488+#ifdef CONFIG_PAX_KERNEXEC
24489+ movl $pa(boot_gdt),%edi
24490+ movl $__LOAD_PHYSICAL_ADDR,%eax
24491+ movw %ax,GDT_ENTRY_BOOT_CS * 8 + 2(%edi)
24492+ rorl $16,%eax
24493+ movb %al,GDT_ENTRY_BOOT_CS * 8 + 4(%edi)
24494+ movb %ah,GDT_ENTRY_BOOT_CS * 8 + 7(%edi)
24495+ rorl $16,%eax
24496+
24497+ ljmp $(__BOOT_CS),$1f
24498+1:
24499+
24500+ movl $NR_CPUS,%ecx
24501+ movl $pa(cpu_gdt_table),%edi
24502+ addl $__PAGE_OFFSET,%eax
24503+1:
24504+ movb $0xc0,GDT_ENTRY_KERNEL_CS * 8 + 6(%edi)
24505+ movb $0xc0,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 6(%edi)
24506+ movw %ax,GDT_ENTRY_KERNEL_CS * 8 + 2(%edi)
24507+ movw %ax,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 2(%edi)
24508+ rorl $16,%eax
24509+ movb %al,GDT_ENTRY_KERNEL_CS * 8 + 4(%edi)
24510+ movb %al,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 4(%edi)
24511+ movb %ah,GDT_ENTRY_KERNEL_CS * 8 + 7(%edi)
24512+ movb %ah,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 7(%edi)
24513+ rorl $16,%eax
24514+ addl $PAGE_SIZE_asm,%edi
24515+ loop 1b
24516+#endif
24517+
24518 /*
24519 * Clear BSS first so that there are no surprises...
24520 */
24521@@ -201,8 +269,11 @@ ENTRY(startup_32)
24522 movl %eax, pa(max_pfn_mapped)
24523
24524 /* Do early initialization of the fixmap area */
24525- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
24526- movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
24527+#ifdef CONFIG_COMPAT_VDSO
24528+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
24529+#else
24530+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
24531+#endif
24532 #else /* Not PAE */
24533
24534 page_pde_offset = (__PAGE_OFFSET >> 20);
24535@@ -232,8 +303,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
24536 movl %eax, pa(max_pfn_mapped)
24537
24538 /* Do early initialization of the fixmap area */
24539- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
24540- movl %eax,pa(initial_page_table+0xffc)
24541+#ifdef CONFIG_COMPAT_VDSO
24542+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
24543+#else
24544+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
24545+#endif
24546 #endif
24547
24548 #ifdef CONFIG_PARAVIRT
24549@@ -247,9 +321,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
24550 cmpl $num_subarch_entries, %eax
24551 jae bad_subarch
24552
24553- movl pa(subarch_entries)(,%eax,4), %eax
24554- subl $__PAGE_OFFSET, %eax
24555- jmp *%eax
24556+ jmp *pa(subarch_entries)(,%eax,4)
24557
24558 bad_subarch:
24559 WEAK(lguest_entry)
24560@@ -261,10 +333,10 @@ WEAK(xen_entry)
24561 __INITDATA
24562
24563 subarch_entries:
24564- .long default_entry /* normal x86/PC */
24565- .long lguest_entry /* lguest hypervisor */
24566- .long xen_entry /* Xen hypervisor */
24567- .long default_entry /* Moorestown MID */
24568+ .long ta(default_entry) /* normal x86/PC */
24569+ .long ta(lguest_entry) /* lguest hypervisor */
24570+ .long ta(xen_entry) /* Xen hypervisor */
24571+ .long ta(default_entry) /* Moorestown MID */
24572 num_subarch_entries = (. - subarch_entries) / 4
24573 .previous
24574 #else
24575@@ -354,6 +426,7 @@ default_entry:
24576 movl pa(mmu_cr4_features),%eax
24577 movl %eax,%cr4
24578
24579+#ifdef CONFIG_X86_PAE
24580 testb $X86_CR4_PAE, %al # check if PAE is enabled
24581 jz enable_paging
24582
24583@@ -382,6 +455,9 @@ default_entry:
24584 /* Make changes effective */
24585 wrmsr
24586
24587+ btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
24588+#endif
24589+
24590 enable_paging:
24591
24592 /*
24593@@ -449,14 +525,20 @@ is486:
24594 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
24595 movl %eax,%ss # after changing gdt.
24596
24597- movl $(__USER_DS),%eax # DS/ES contains default USER segment
24598+# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
24599 movl %eax,%ds
24600 movl %eax,%es
24601
24602 movl $(__KERNEL_PERCPU), %eax
24603 movl %eax,%fs # set this cpu's percpu
24604
24605+#ifdef CONFIG_CC_STACKPROTECTOR
24606 movl $(__KERNEL_STACK_CANARY),%eax
24607+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
24608+ movl $(__USER_DS),%eax
24609+#else
24610+ xorl %eax,%eax
24611+#endif
24612 movl %eax,%gs
24613
24614 xorl %eax,%eax # Clear LDT
24615@@ -513,8 +595,11 @@ setup_once:
24616 * relocation. Manually set base address in stack canary
24617 * segment descriptor.
24618 */
24619- movl $gdt_page,%eax
24620+ movl $cpu_gdt_table,%eax
24621 movl $stack_canary,%ecx
24622+#ifdef CONFIG_SMP
24623+ addl $__per_cpu_load,%ecx
24624+#endif
24625 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
24626 shrl $16, %ecx
24627 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
24628@@ -551,7 +636,7 @@ early_idt_handler_common:
24629 cmpl $2,(%esp) # X86_TRAP_NMI
24630 je is_nmi # Ignore NMI
24631
24632- cmpl $2,%ss:early_recursion_flag
24633+ cmpl $1,%ss:early_recursion_flag
24634 je hlt_loop
24635 incl %ss:early_recursion_flag
24636
24637@@ -589,8 +674,8 @@ early_idt_handler_common:
24638 pushl (20+6*4)(%esp) /* trapno */
24639 pushl $fault_msg
24640 call printk
24641-#endif
24642 call dump_stack
24643+#endif
24644 hlt_loop:
24645 hlt
24646 jmp hlt_loop
24647@@ -610,8 +695,11 @@ ENDPROC(early_idt_handler_common)
24648 /* This is the default interrupt "handler" :-) */
24649 ALIGN
24650 ignore_int:
24651- cld
24652 #ifdef CONFIG_PRINTK
24653+ cmpl $2,%ss:early_recursion_flag
24654+ je hlt_loop
24655+ incl %ss:early_recursion_flag
24656+ cld
24657 pushl %eax
24658 pushl %ecx
24659 pushl %edx
24660@@ -620,9 +708,6 @@ ignore_int:
24661 movl $(__KERNEL_DS),%eax
24662 movl %eax,%ds
24663 movl %eax,%es
24664- cmpl $2,early_recursion_flag
24665- je hlt_loop
24666- incl early_recursion_flag
24667 pushl 16(%esp)
24668 pushl 24(%esp)
24669 pushl 32(%esp)
24670@@ -656,29 +741,34 @@ ENTRY(setup_once_ref)
24671 /*
24672 * BSS section
24673 */
24674-__PAGE_ALIGNED_BSS
24675- .align PAGE_SIZE
24676 #ifdef CONFIG_X86_PAE
24677+.section .initial_pg_pmd,"a",@progbits
24678 initial_pg_pmd:
24679 .fill 1024*KPMDS,4,0
24680 #else
24681+.section .initial_page_table,"a",@progbits
24682 ENTRY(initial_page_table)
24683 .fill 1024,4,0
24684 #endif
24685+.section .initial_pg_fixmap,"a",@progbits
24686 initial_pg_fixmap:
24687 .fill 1024,4,0
24688+.section .empty_zero_page,"a",@progbits
24689 ENTRY(empty_zero_page)
24690 .fill 4096,1,0
24691+.section .swapper_pg_dir,"a",@progbits
24692 ENTRY(swapper_pg_dir)
24693+#ifdef CONFIG_X86_PAE
24694+ .fill 4,8,0
24695+#else
24696 .fill 1024,4,0
24697+#endif
24698
24699 /*
24700 * This starts the data section.
24701 */
24702 #ifdef CONFIG_X86_PAE
24703-__PAGE_ALIGNED_DATA
24704- /* Page-aligned for the benefit of paravirt? */
24705- .align PAGE_SIZE
24706+.section .initial_page_table,"a",@progbits
24707 ENTRY(initial_page_table)
24708 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
24709 # if KPMDS == 3
24710@@ -697,12 +787,20 @@ ENTRY(initial_page_table)
24711 # error "Kernel PMDs should be 1, 2 or 3"
24712 # endif
24713 .align PAGE_SIZE /* needs to be page-sized too */
24714+
24715+#ifdef CONFIG_PAX_PER_CPU_PGD
24716+ENTRY(cpu_pgd)
24717+ .rept 2*NR_CPUS
24718+ .fill 4,8,0
24719+ .endr
24720+#endif
24721+
24722 #endif
24723
24724 .data
24725 .balign 4
24726 ENTRY(stack_start)
24727- .long init_thread_union+THREAD_SIZE
24728+ .long init_thread_union+THREAD_SIZE-8
24729
24730 __INITRODATA
24731 int_msg:
24732@@ -730,7 +828,7 @@ fault_msg:
24733 * segment size, and 32-bit linear address value:
24734 */
24735
24736- .data
24737+.section .rodata,"a",@progbits
24738 .globl boot_gdt_descr
24739 .globl idt_descr
24740
24741@@ -739,7 +837,7 @@ fault_msg:
24742 .word 0 # 32 bit align gdt_desc.address
24743 boot_gdt_descr:
24744 .word __BOOT_DS+7
24745- .long boot_gdt - __PAGE_OFFSET
24746+ .long pa(boot_gdt)
24747
24748 .word 0 # 32-bit align idt_desc.address
24749 idt_descr:
24750@@ -750,7 +848,7 @@ idt_descr:
24751 .word 0 # 32 bit align gdt_desc.address
24752 ENTRY(early_gdt_descr)
24753 .word GDT_ENTRIES*8-1
24754- .long gdt_page /* Overwritten for secondary CPUs */
24755+ .long cpu_gdt_table /* Overwritten for secondary CPUs */
24756
24757 /*
24758 * The boot_gdt must mirror the equivalent in setup.S and is
24759@@ -759,5 +857,65 @@ ENTRY(early_gdt_descr)
24760 .align L1_CACHE_BYTES
24761 ENTRY(boot_gdt)
24762 .fill GDT_ENTRY_BOOT_CS,8,0
24763- .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
24764- .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
24765+ .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
24766+ .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
24767+
24768+ .align PAGE_SIZE_asm
24769+ENTRY(cpu_gdt_table)
24770+ .rept NR_CPUS
24771+ .quad 0x0000000000000000 /* NULL descriptor */
24772+ .quad 0x0000000000000000 /* 0x0b reserved */
24773+ .quad 0x0000000000000000 /* 0x13 reserved */
24774+ .quad 0x0000000000000000 /* 0x1b reserved */
24775+
24776+#ifdef CONFIG_PAX_KERNEXEC
24777+ .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
24778+#else
24779+ .quad 0x0000000000000000 /* 0x20 unused */
24780+#endif
24781+
24782+ .quad 0x0000000000000000 /* 0x28 unused */
24783+ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
24784+ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
24785+ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
24786+ .quad 0x0000000000000000 /* 0x4b reserved */
24787+ .quad 0x0000000000000000 /* 0x53 reserved */
24788+ .quad 0x0000000000000000 /* 0x5b reserved */
24789+
24790+ .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
24791+ .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
24792+ .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
24793+ .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
24794+
24795+ .quad 0x0000000000000000 /* 0x80 TSS descriptor */
24796+ .quad 0x0000000000000000 /* 0x88 LDT descriptor */
24797+
24798+ /*
24799+ * Segments used for calling PnP BIOS have byte granularity.
24800+ * The code segments and data segments have fixed 64k limits,
24801+ * the transfer segment sizes are set at run time.
24802+ */
24803+ .quad 0x00409b000000ffff /* 0x90 32-bit code */
24804+ .quad 0x00009b000000ffff /* 0x98 16-bit code */
24805+ .quad 0x000093000000ffff /* 0xa0 16-bit data */
24806+ .quad 0x0000930000000000 /* 0xa8 16-bit data */
24807+ .quad 0x0000930000000000 /* 0xb0 16-bit data */
24808+
24809+ /*
24810+ * The APM segments have byte granularity and their bases
24811+ * are set at run time. All have 64k limits.
24812+ */
24813+ .quad 0x00409b000000ffff /* 0xb8 APM CS code */
24814+ .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
24815+ .quad 0x004093000000ffff /* 0xc8 APM DS data */
24816+
24817+ .quad 0x00c093000000ffff /* 0xd0 - ESPFIX SS */
24818+ .quad 0x0040930000000000 /* 0xd8 - PERCPU */
24819+ .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
24820+ .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
24821+ .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
24822+ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
24823+
24824+ /* Be sure this is zeroed to avoid false validations in Xen */
24825+ .fill PAGE_SIZE_asm - GDT_SIZE,1,0
24826+ .endr
24827diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
24828index f8a8406..ad6d014 100644
24829--- a/arch/x86/kernel/head_64.S
24830+++ b/arch/x86/kernel/head_64.S
24831@@ -20,6 +20,8 @@
24832 #include <asm/processor-flags.h>
24833 #include <asm/percpu.h>
24834 #include <asm/nops.h>
24835+#include <asm/cpufeature.h>
24836+#include <asm/alternative-asm.h>
24837
24838 #ifdef CONFIG_PARAVIRT
24839 #include <asm/asm-offsets.h>
24840@@ -41,6 +43,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
24841 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
24842 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
24843 L3_START_KERNEL = pud_index(__START_KERNEL_map)
24844+L4_VMALLOC_START = pgd_index(VMALLOC_START)
24845+L3_VMALLOC_START = pud_index(VMALLOC_START)
24846+L4_VMALLOC_END = pgd_index(VMALLOC_END)
24847+L3_VMALLOC_END = pud_index(VMALLOC_END)
24848+L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
24849+L3_VMEMMAP_START = pud_index(VMEMMAP_START)
24850
24851 .text
24852 __HEAD
24853@@ -89,11 +97,26 @@ startup_64:
24854 * Fixup the physical addresses in the page table
24855 */
24856 addq %rbp, early_level4_pgt + (L4_START_KERNEL*8)(%rip)
24857+ addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
24858+ addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
24859+ addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip)
24860+ addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
24861+ addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
24862
24863- addq %rbp, level3_kernel_pgt + (510*8)(%rip)
24864- addq %rbp, level3_kernel_pgt + (511*8)(%rip)
24865+ addq %rbp, level3_ident_pgt + (0*8)(%rip)
24866+#ifndef CONFIG_XEN
24867+ addq %rbp, level3_ident_pgt + (1*8)(%rip)
24868+#endif
24869
24870+ addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
24871+
24872+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
24873+ addq %rbp, level3_kernel_pgt + ((L3_START_KERNEL+1)*8)(%rip)
24874+
24875+ addq %rbp, level2_fixmap_pgt + (504*8)(%rip)
24876+ addq %rbp, level2_fixmap_pgt + (505*8)(%rip)
24877 addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
24878+ addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
24879
24880 /*
24881 * Set up the identity mapping for the switchover. These
24882@@ -174,11 +197,12 @@ ENTRY(secondary_startup_64)
24883 * after the boot processor executes this code.
24884 */
24885
24886+ orq $-1, %rbp
24887 movq $(init_level4_pgt - __START_KERNEL_map), %rax
24888 1:
24889
24890- /* Enable PAE mode and PGE */
24891- movl $(X86_CR4_PAE | X86_CR4_PGE), %ecx
24892+ /* Enable PAE mode and PSE/PGE */
24893+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %ecx
24894 movq %rcx, %cr4
24895
24896 /* Setup early boot stage 4 level pagetables. */
24897@@ -199,10 +223,21 @@ ENTRY(secondary_startup_64)
24898 movl $MSR_EFER, %ecx
24899 rdmsr
24900 btsl $_EFER_SCE, %eax /* Enable System Call */
24901- btl $20,%edi /* No Execute supported? */
24902+ btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
24903 jnc 1f
24904 btsl $_EFER_NX, %eax
24905+ cmpq $-1, %rbp
24906+ je 1f
24907 btsq $_PAGE_BIT_NX,early_pmd_flags(%rip)
24908+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_PAGE_OFFSET(%rip)
24909+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_VMALLOC_START(%rip)
24910+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_VMALLOC_END(%rip)
24911+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_VMEMMAP_START(%rip)
24912+ btsq $_PAGE_BIT_NX, level2_fixmap_pgt + 8*504(%rip)
24913+ btsq $_PAGE_BIT_NX, level2_fixmap_pgt + 8*505(%rip)
24914+ btsq $_PAGE_BIT_NX, level2_fixmap_pgt + 8*506(%rip)
24915+ btsq $_PAGE_BIT_NX, level2_fixmap_pgt + 8*507(%rip)
24916+ btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
24917 1: wrmsr /* Make changes effective */
24918
24919 /* Setup cr0 */
24920@@ -282,6 +317,7 @@ ENTRY(secondary_startup_64)
24921 * REX.W + FF /5 JMP m16:64 Jump far, absolute indirect,
24922 * address given in m16:64.
24923 */
24924+ pax_set_fptr_mask
24925 movq initial_code(%rip),%rax
24926 pushq $0 # fake return address to stop unwinder
24927 pushq $__KERNEL_CS # set correct cs
24928@@ -313,7 +349,7 @@ ENDPROC(start_cpu0)
24929 .quad INIT_PER_CPU_VAR(irq_stack_union)
24930
24931 GLOBAL(stack_start)
24932- .quad init_thread_union+THREAD_SIZE-8
24933+ .quad init_thread_union+THREAD_SIZE-16
24934 .word 0
24935 __FINITDATA
24936
24937@@ -393,7 +429,7 @@ early_idt_handler_common:
24938 call dump_stack
24939 #ifdef CONFIG_KALLSYMS
24940 leaq early_idt_ripmsg(%rip),%rdi
24941- movq 40(%rsp),%rsi # %rip again
24942+ movq 88(%rsp),%rsi # %rip again
24943 call __print_symbol
24944 #endif
24945 #endif /* EARLY_PRINTK */
24946@@ -422,6 +458,7 @@ ENDPROC(early_idt_handler_common)
24947 early_recursion_flag:
24948 .long 0
24949
24950+ .section .rodata,"a",@progbits
24951 #ifdef CONFIG_EARLY_PRINTK
24952 early_idt_msg:
24953 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
24954@@ -449,29 +486,52 @@ NEXT_PAGE(early_level4_pgt)
24955 NEXT_PAGE(early_dynamic_pgts)
24956 .fill 512*EARLY_DYNAMIC_PAGE_TABLES,8,0
24957
24958- .data
24959+ .section .rodata,"a",@progbits
24960
24961-#ifndef CONFIG_XEN
24962 NEXT_PAGE(init_level4_pgt)
24963- .fill 512,8,0
24964-#else
24965-NEXT_PAGE(init_level4_pgt)
24966- .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
24967 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
24968 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
24969+ .org init_level4_pgt + L4_VMALLOC_START*8, 0
24970+ .quad level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE
24971+ .org init_level4_pgt + L4_VMALLOC_END*8, 0
24972+ .quad level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE
24973+ .org init_level4_pgt + L4_VMEMMAP_START*8, 0
24974+ .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
24975 .org init_level4_pgt + L4_START_KERNEL*8, 0
24976 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
24977 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
24978
24979+#ifdef CONFIG_PAX_PER_CPU_PGD
24980+NEXT_PAGE(cpu_pgd)
24981+ .rept 2*NR_CPUS
24982+ .fill 512,8,0
24983+ .endr
24984+#endif
24985+
24986 NEXT_PAGE(level3_ident_pgt)
24987 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
24988+#ifdef CONFIG_XEN
24989 .fill 511, 8, 0
24990+#else
24991+ .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
24992+ .fill 510,8,0
24993+#endif
24994+
24995+NEXT_PAGE(level3_vmalloc_start_pgt)
24996+ .fill 512,8,0
24997+
24998+NEXT_PAGE(level3_vmalloc_end_pgt)
24999+ .fill 512,8,0
25000+
25001+NEXT_PAGE(level3_vmemmap_pgt)
25002+ .fill L3_VMEMMAP_START,8,0
25003+ .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
25004+
25005 NEXT_PAGE(level2_ident_pgt)
25006- /* Since I easily can, map the first 1G.
25007+ /* Since I easily can, map the first 2G.
25008 * Don't set NX because code runs from these pages.
25009 */
25010- PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
25011-#endif
25012+ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
25013
25014 NEXT_PAGE(level3_kernel_pgt)
25015 .fill L3_START_KERNEL,8,0
25016@@ -479,6 +539,9 @@ NEXT_PAGE(level3_kernel_pgt)
25017 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
25018 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
25019
25020+NEXT_PAGE(level2_vmemmap_pgt)
25021+ .fill 512,8,0
25022+
25023 NEXT_PAGE(level2_kernel_pgt)
25024 /*
25025 * 512 MB kernel mapping. We spend a full page on this pagetable
25026@@ -494,23 +557,61 @@ NEXT_PAGE(level2_kernel_pgt)
25027 KERNEL_IMAGE_SIZE/PMD_SIZE)
25028
25029 NEXT_PAGE(level2_fixmap_pgt)
25030- .fill 506,8,0
25031- .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
25032- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
25033- .fill 5,8,0
25034+ .fill 504,8,0
25035+ .quad level1_fixmap_pgt - __START_KERNEL_map + 0 * PAGE_SIZE + _PAGE_TABLE
25036+ .quad level1_fixmap_pgt - __START_KERNEL_map + 1 * PAGE_SIZE + _PAGE_TABLE
25037+ .quad level1_fixmap_pgt - __START_KERNEL_map + 2 * PAGE_SIZE + _PAGE_TABLE
25038+ .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
25039+ /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
25040+ .fill 4,8,0
25041
25042 NEXT_PAGE(level1_fixmap_pgt)
25043+ .fill 3*512,8,0
25044+
25045+NEXT_PAGE(level1_vsyscall_pgt)
25046 .fill 512,8,0
25047
25048 #undef PMDS
25049
25050- .data
25051+ .align PAGE_SIZE
25052+ENTRY(cpu_gdt_table)
25053+ .rept NR_CPUS
25054+ .quad 0x0000000000000000 /* NULL descriptor */
25055+ .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
25056+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
25057+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
25058+ .quad 0x00cffb000000ffff /* __USER32_CS */
25059+ .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
25060+ .quad 0x00affb000000ffff /* __USER_CS */
25061+
25062+#ifdef CONFIG_PAX_KERNEXEC
25063+ .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
25064+#else
25065+ .quad 0x0 /* unused */
25066+#endif
25067+
25068+ .quad 0,0 /* TSS */
25069+ .quad 0,0 /* LDT */
25070+ .quad 0,0,0 /* three TLS descriptors */
25071+ .quad 0x0000f40000000000 /* node/CPU stored in limit */
25072+ /* asm/segment.h:GDT_ENTRIES must match this */
25073+
25074+#ifdef CONFIG_PAX_MEMORY_UDEREF
25075+ .quad 0x00cf93000000ffff /* __UDEREF_KERNEL_DS */
25076+#else
25077+ .quad 0x0 /* unused */
25078+#endif
25079+
25080+ /* zero the remaining page */
25081+ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
25082+ .endr
25083+
25084 .align 16
25085 .globl early_gdt_descr
25086 early_gdt_descr:
25087 .word GDT_ENTRIES*8-1
25088 early_gdt_descr_base:
25089- .quad INIT_PER_CPU_VAR(gdt_page)
25090+ .quad cpu_gdt_table
25091
25092 ENTRY(phys_base)
25093 /* This must match the first entry in level2_kernel_pgt */
25094@@ -534,8 +635,8 @@ NEXT_PAGE(kasan_zero_pud)
25095
25096
25097 #include "../../x86/xen/xen-head.S"
25098-
25099- __PAGE_ALIGNED_BSS
25100+
25101+ .section .rodata,"a",@progbits
25102 NEXT_PAGE(empty_zero_page)
25103 .skip PAGE_SIZE
25104
25105diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
25106index 05fd74f..c3548b1 100644
25107--- a/arch/x86/kernel/i386_ksyms_32.c
25108+++ b/arch/x86/kernel/i386_ksyms_32.c
25109@@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
25110 EXPORT_SYMBOL(cmpxchg8b_emu);
25111 #endif
25112
25113+EXPORT_SYMBOL_GPL(cpu_gdt_table);
25114+
25115 /* Networking helper routines. */
25116 EXPORT_SYMBOL(csum_partial_copy_generic);
25117+EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
25118+EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
25119
25120 EXPORT_SYMBOL(__get_user_1);
25121 EXPORT_SYMBOL(__get_user_2);
25122@@ -44,3 +48,11 @@ EXPORT_SYMBOL(___preempt_schedule);
25123 EXPORT_SYMBOL(___preempt_schedule_context);
25124 #endif
25125 #endif
25126+
25127+#ifdef CONFIG_PAX_KERNEXEC
25128+EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
25129+#endif
25130+
25131+#ifdef CONFIG_PAX_PER_CPU_PGD
25132+EXPORT_SYMBOL(cpu_pgd);
25133+#endif
25134diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
25135index f341d56..d9b527b 100644
25136--- a/arch/x86/kernel/i387.c
25137+++ b/arch/x86/kernel/i387.c
25138@@ -68,7 +68,7 @@ static inline bool interrupted_kernel_fpu_idle(void)
25139 static inline bool interrupted_user_mode(void)
25140 {
25141 struct pt_regs *regs = get_irq_regs();
25142- return regs && user_mode_vm(regs);
25143+ return regs && user_mode(regs);
25144 }
25145
25146 /*
25147diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
25148index e7cc537..67d7372 100644
25149--- a/arch/x86/kernel/i8259.c
25150+++ b/arch/x86/kernel/i8259.c
25151@@ -110,7 +110,7 @@ static int i8259A_irq_pending(unsigned int irq)
25152 static void make_8259A_irq(unsigned int irq)
25153 {
25154 disable_irq_nosync(irq);
25155- io_apic_irqs &= ~(1<<irq);
25156+ io_apic_irqs &= ~(1UL<<irq);
25157 irq_set_chip_and_handler(irq, &i8259A_chip, handle_level_irq);
25158 enable_irq(irq);
25159 }
25160@@ -208,7 +208,7 @@ spurious_8259A_irq:
25161 "spurious 8259A interrupt: IRQ%d.\n", irq);
25162 spurious_irq_mask |= irqmask;
25163 }
25164- atomic_inc(&irq_err_count);
25165+ atomic_inc_unchecked(&irq_err_count);
25166 /*
25167 * Theoretically we do not have to handle this IRQ,
25168 * but in Linux this does not cause problems and is
25169@@ -349,14 +349,16 @@ static void init_8259A(int auto_eoi)
25170 /* (slave's support for AEOI in flat mode is to be investigated) */
25171 outb_pic(SLAVE_ICW4_DEFAULT, PIC_SLAVE_IMR);
25172
25173+ pax_open_kernel();
25174 if (auto_eoi)
25175 /*
25176 * In AEOI mode we just have to mask the interrupt
25177 * when acking.
25178 */
25179- i8259A_chip.irq_mask_ack = disable_8259A_irq;
25180+ *(void **)&i8259A_chip.irq_mask_ack = disable_8259A_irq;
25181 else
25182- i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
25183+ *(void **)&i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
25184+ pax_close_kernel();
25185
25186 udelay(100); /* wait for 8259A to initialize */
25187
25188diff --git a/arch/x86/kernel/io_delay.c b/arch/x86/kernel/io_delay.c
25189index a979b5b..1d6db75 100644
25190--- a/arch/x86/kernel/io_delay.c
25191+++ b/arch/x86/kernel/io_delay.c
25192@@ -58,7 +58,7 @@ static int __init dmi_io_delay_0xed_port(const struct dmi_system_id *id)
25193 * Quirk table for systems that misbehave (lock up, etc.) if port
25194 * 0x80 is used:
25195 */
25196-static struct dmi_system_id __initdata io_delay_0xed_port_dmi_table[] = {
25197+static const struct dmi_system_id __initconst io_delay_0xed_port_dmi_table[] = {
25198 {
25199 .callback = dmi_io_delay_0xed_port,
25200 .ident = "Compaq Presario V6000",
25201diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
25202index 4ddaf66..49d5c18 100644
25203--- a/arch/x86/kernel/ioport.c
25204+++ b/arch/x86/kernel/ioport.c
25205@@ -6,6 +6,7 @@
25206 #include <linux/sched.h>
25207 #include <linux/kernel.h>
25208 #include <linux/capability.h>
25209+#include <linux/security.h>
25210 #include <linux/errno.h>
25211 #include <linux/types.h>
25212 #include <linux/ioport.h>
25213@@ -30,6 +31,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
25214 return -EINVAL;
25215 if (turn_on && !capable(CAP_SYS_RAWIO))
25216 return -EPERM;
25217+#ifdef CONFIG_GRKERNSEC_IO
25218+ if (turn_on && grsec_disable_privio) {
25219+ gr_handle_ioperm();
25220+ return -ENODEV;
25221+ }
25222+#endif
25223
25224 /*
25225 * If it's the first ioperm() call in this thread's lifetime, set the
25226@@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
25227 * because the ->io_bitmap_max value must match the bitmap
25228 * contents:
25229 */
25230- tss = &per_cpu(init_tss, get_cpu());
25231+ tss = init_tss + get_cpu();
25232
25233 if (turn_on)
25234 bitmap_clear(t->io_bitmap_ptr, from, num);
25235@@ -105,6 +112,12 @@ SYSCALL_DEFINE1(iopl, unsigned int, level)
25236 if (level > old) {
25237 if (!capable(CAP_SYS_RAWIO))
25238 return -EPERM;
25239+#ifdef CONFIG_GRKERNSEC_IO
25240+ if (grsec_disable_privio) {
25241+ gr_handle_iopl();
25242+ return -ENODEV;
25243+ }
25244+#endif
25245 }
25246 regs->flags = (regs->flags & ~X86_EFLAGS_IOPL) | (level << 12);
25247 t->iopl = level << 12;
25248diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
25249index 67b1cbe..6ad4cbc 100644
25250--- a/arch/x86/kernel/irq.c
25251+++ b/arch/x86/kernel/irq.c
25252@@ -22,7 +22,7 @@
25253 #define CREATE_TRACE_POINTS
25254 #include <asm/trace/irq_vectors.h>
25255
25256-atomic_t irq_err_count;
25257+atomic_unchecked_t irq_err_count;
25258
25259 /* Function pointer for generic interrupt vector handling */
25260 void (*x86_platform_ipi_callback)(void) = NULL;
25261@@ -132,9 +132,9 @@ int arch_show_interrupts(struct seq_file *p, int prec)
25262 seq_printf(p, "%10u ", irq_stats(j)->irq_hv_callback_count);
25263 seq_puts(p, " Hypervisor callback interrupts\n");
25264 #endif
25265- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
25266+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
25267 #if defined(CONFIG_X86_IO_APIC)
25268- seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
25269+ seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
25270 #endif
25271 return 0;
25272 }
25273@@ -174,7 +174,7 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
25274
25275 u64 arch_irq_stat(void)
25276 {
25277- u64 sum = atomic_read(&irq_err_count);
25278+ u64 sum = atomic_read_unchecked(&irq_err_count);
25279 return sum;
25280 }
25281
25282diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
25283index 28d28f5..e6cc9ae 100644
25284--- a/arch/x86/kernel/irq_32.c
25285+++ b/arch/x86/kernel/irq_32.c
25286@@ -29,6 +29,8 @@ EXPORT_PER_CPU_SYMBOL(irq_regs);
25287
25288 #ifdef CONFIG_DEBUG_STACKOVERFLOW
25289
25290+extern void gr_handle_kernel_exploit(void);
25291+
25292 int sysctl_panic_on_stackoverflow __read_mostly;
25293
25294 /* Debugging check for stack overflow: is there less than 1KB free? */
25295@@ -39,13 +41,14 @@ static int check_stack_overflow(void)
25296 __asm__ __volatile__("andl %%esp,%0" :
25297 "=r" (sp) : "0" (THREAD_SIZE - 1));
25298
25299- return sp < (sizeof(struct thread_info) + STACK_WARN);
25300+ return sp < STACK_WARN;
25301 }
25302
25303 static void print_stack_overflow(void)
25304 {
25305 printk(KERN_WARNING "low stack detected by irq handler\n");
25306 dump_stack();
25307+ gr_handle_kernel_exploit();
25308 if (sysctl_panic_on_stackoverflow)
25309 panic("low stack detected by irq handler - check messages\n");
25310 }
25311@@ -77,10 +80,9 @@ static inline void *current_stack(void)
25312 static inline int
25313 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
25314 {
25315- struct irq_stack *curstk, *irqstk;
25316+ struct irq_stack *irqstk;
25317 u32 *isp, *prev_esp, arg1, arg2;
25318
25319- curstk = (struct irq_stack *) current_stack();
25320 irqstk = __this_cpu_read(hardirq_stack);
25321
25322 /*
25323@@ -89,15 +91,19 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
25324 * handler) we can't do that and just have to keep using the
25325 * current stack (which is the irq stack already after all)
25326 */
25327- if (unlikely(curstk == irqstk))
25328+ if (unlikely((void *)current_stack_pointer - (void *)irqstk < THREAD_SIZE))
25329 return 0;
25330
25331- isp = (u32 *) ((char *)irqstk + sizeof(*irqstk));
25332+ isp = (u32 *) ((char *)irqstk + sizeof(*irqstk) - 8);
25333
25334 /* Save the next esp at the bottom of the stack */
25335 prev_esp = (u32 *)irqstk;
25336 *prev_esp = current_stack_pointer();
25337
25338+#ifdef CONFIG_PAX_MEMORY_UDEREF
25339+ __set_fs(MAKE_MM_SEG(0));
25340+#endif
25341+
25342 if (unlikely(overflow))
25343 call_on_stack(print_stack_overflow, isp);
25344
25345@@ -108,6 +114,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
25346 : "0" (irq), "1" (desc), "2" (isp),
25347 "D" (desc->handle_irq)
25348 : "memory", "cc", "ecx");
25349+
25350+#ifdef CONFIG_PAX_MEMORY_UDEREF
25351+ __set_fs(current_thread_info()->addr_limit);
25352+#endif
25353+
25354 return 1;
25355 }
25356
25357@@ -116,32 +127,18 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
25358 */
25359 void irq_ctx_init(int cpu)
25360 {
25361- struct irq_stack *irqstk;
25362-
25363 if (per_cpu(hardirq_stack, cpu))
25364 return;
25365
25366- irqstk = page_address(alloc_pages_node(cpu_to_node(cpu),
25367- THREADINFO_GFP,
25368- THREAD_SIZE_ORDER));
25369- per_cpu(hardirq_stack, cpu) = irqstk;
25370-
25371- irqstk = page_address(alloc_pages_node(cpu_to_node(cpu),
25372- THREADINFO_GFP,
25373- THREAD_SIZE_ORDER));
25374- per_cpu(softirq_stack, cpu) = irqstk;
25375-
25376- printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
25377- cpu, per_cpu(hardirq_stack, cpu), per_cpu(softirq_stack, cpu));
25378+ per_cpu(hardirq_stack, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER));
25379+ per_cpu(softirq_stack, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER));
25380 }
25381
25382 void do_softirq_own_stack(void)
25383 {
25384- struct thread_info *curstk;
25385 struct irq_stack *irqstk;
25386 u32 *isp, *prev_esp;
25387
25388- curstk = current_stack();
25389 irqstk = __this_cpu_read(softirq_stack);
25390
25391 /* build the stack frame on the softirq stack */
25392@@ -151,7 +148,16 @@ void do_softirq_own_stack(void)
25393 prev_esp = (u32 *)irqstk;
25394 *prev_esp = current_stack_pointer();
25395
25396+#ifdef CONFIG_PAX_MEMORY_UDEREF
25397+ __set_fs(MAKE_MM_SEG(0));
25398+#endif
25399+
25400 call_on_stack(__do_softirq, isp);
25401+
25402+#ifdef CONFIG_PAX_MEMORY_UDEREF
25403+ __set_fs(current_thread_info()->addr_limit);
25404+#endif
25405+
25406 }
25407
25408 bool handle_irq(unsigned irq, struct pt_regs *regs)
25409@@ -165,7 +171,7 @@ bool handle_irq(unsigned irq, struct pt_regs *regs)
25410 if (unlikely(!desc))
25411 return false;
25412
25413- if (user_mode_vm(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
25414+ if (user_mode(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
25415 if (unlikely(overflow))
25416 print_stack_overflow();
25417 desc->handle_irq(irq, desc);
25418diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
25419index e4b503d..824fce8 100644
25420--- a/arch/x86/kernel/irq_64.c
25421+++ b/arch/x86/kernel/irq_64.c
25422@@ -26,6 +26,8 @@ EXPORT_PER_CPU_SYMBOL(irq_stat);
25423 DEFINE_PER_CPU(struct pt_regs *, irq_regs);
25424 EXPORT_PER_CPU_SYMBOL(irq_regs);
25425
25426+extern void gr_handle_kernel_exploit(void);
25427+
25428 int sysctl_panic_on_stackoverflow;
25429
25430 /*
25431@@ -44,7 +46,7 @@ static inline void stack_overflow_check(struct pt_regs *regs)
25432 u64 estack_top, estack_bottom;
25433 u64 curbase = (u64)task_stack_page(current);
25434
25435- if (user_mode_vm(regs))
25436+ if (user_mode(regs))
25437 return;
25438
25439 if (regs->sp >= curbase + sizeof(struct thread_info) +
25440@@ -69,6 +71,8 @@ static inline void stack_overflow_check(struct pt_regs *regs)
25441 irq_stack_top, irq_stack_bottom,
25442 estack_top, estack_bottom);
25443
25444+ gr_handle_kernel_exploit();
25445+
25446 if (sysctl_panic_on_stackoverflow)
25447 panic("low stack detected by irq handler - check messages\n");
25448 #endif
25449diff --git a/arch/x86/kernel/jump_label.c b/arch/x86/kernel/jump_label.c
25450index 26d5a55..a01160a 100644
25451--- a/arch/x86/kernel/jump_label.c
25452+++ b/arch/x86/kernel/jump_label.c
25453@@ -51,7 +51,7 @@ static void __jump_label_transform(struct jump_entry *entry,
25454 * Jump label is enabled for the first time.
25455 * So we expect a default_nop...
25456 */
25457- if (unlikely(memcmp((void *)entry->code, default_nop, 5)
25458+ if (unlikely(memcmp((void *)ktla_ktva(entry->code), default_nop, 5)
25459 != 0))
25460 bug_at((void *)entry->code, __LINE__);
25461 } else {
25462@@ -59,7 +59,7 @@ static void __jump_label_transform(struct jump_entry *entry,
25463 * ...otherwise expect an ideal_nop. Otherwise
25464 * something went horribly wrong.
25465 */
25466- if (unlikely(memcmp((void *)entry->code, ideal_nop, 5)
25467+ if (unlikely(memcmp((void *)ktla_ktva(entry->code), ideal_nop, 5)
25468 != 0))
25469 bug_at((void *)entry->code, __LINE__);
25470 }
25471@@ -75,13 +75,13 @@ static void __jump_label_transform(struct jump_entry *entry,
25472 * are converting the default nop to the ideal nop.
25473 */
25474 if (init) {
25475- if (unlikely(memcmp((void *)entry->code, default_nop, 5) != 0))
25476+ if (unlikely(memcmp((void *)ktla_ktva(entry->code), default_nop, 5) != 0))
25477 bug_at((void *)entry->code, __LINE__);
25478 } else {
25479 code.jump = 0xe9;
25480 code.offset = entry->target -
25481 (entry->code + JUMP_LABEL_NOP_SIZE);
25482- if (unlikely(memcmp((void *)entry->code, &code, 5) != 0))
25483+ if (unlikely(memcmp((void *)ktla_ktva(entry->code), &code, 5) != 0))
25484 bug_at((void *)entry->code, __LINE__);
25485 }
25486 memcpy(&code, ideal_nops[NOP_ATOMIC5], JUMP_LABEL_NOP_SIZE);
25487diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
25488index 25ecd56..e12482f 100644
25489--- a/arch/x86/kernel/kgdb.c
25490+++ b/arch/x86/kernel/kgdb.c
25491@@ -126,11 +126,11 @@ char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
25492 #ifdef CONFIG_X86_32
25493 switch (regno) {
25494 case GDB_SS:
25495- if (!user_mode_vm(regs))
25496+ if (!user_mode(regs))
25497 *(unsigned long *)mem = __KERNEL_DS;
25498 break;
25499 case GDB_SP:
25500- if (!user_mode_vm(regs))
25501+ if (!user_mode(regs))
25502 *(unsigned long *)mem = kernel_stack_pointer(regs);
25503 break;
25504 case GDB_GS:
25505@@ -228,7 +228,10 @@ static void kgdb_correct_hw_break(void)
25506 bp->attr.bp_addr = breakinfo[breakno].addr;
25507 bp->attr.bp_len = breakinfo[breakno].len;
25508 bp->attr.bp_type = breakinfo[breakno].type;
25509- info->address = breakinfo[breakno].addr;
25510+ if (breakinfo[breakno].type == X86_BREAKPOINT_EXECUTE)
25511+ info->address = ktla_ktva(breakinfo[breakno].addr);
25512+ else
25513+ info->address = breakinfo[breakno].addr;
25514 info->len = breakinfo[breakno].len;
25515 info->type = breakinfo[breakno].type;
25516 val = arch_install_hw_breakpoint(bp);
25517@@ -475,12 +478,12 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
25518 case 'k':
25519 /* clear the trace bit */
25520 linux_regs->flags &= ~X86_EFLAGS_TF;
25521- atomic_set(&kgdb_cpu_doing_single_step, -1);
25522+ atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
25523
25524 /* set the trace bit if we're stepping */
25525 if (remcomInBuffer[0] == 's') {
25526 linux_regs->flags |= X86_EFLAGS_TF;
25527- atomic_set(&kgdb_cpu_doing_single_step,
25528+ atomic_set_unchecked(&kgdb_cpu_doing_single_step,
25529 raw_smp_processor_id());
25530 }
25531
25532@@ -545,7 +548,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
25533
25534 switch (cmd) {
25535 case DIE_DEBUG:
25536- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
25537+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
25538 if (user_mode(regs))
25539 return single_step_cont(regs, args);
25540 break;
25541@@ -750,11 +753,11 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
25542 #endif /* CONFIG_DEBUG_RODATA */
25543
25544 bpt->type = BP_BREAKPOINT;
25545- err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr,
25546+ err = probe_kernel_read(bpt->saved_instr, ktla_ktva((char *)bpt->bpt_addr),
25547 BREAK_INSTR_SIZE);
25548 if (err)
25549 return err;
25550- err = probe_kernel_write((char *)bpt->bpt_addr,
25551+ err = probe_kernel_write(ktla_ktva((char *)bpt->bpt_addr),
25552 arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE);
25553 #ifdef CONFIG_DEBUG_RODATA
25554 if (!err)
25555@@ -767,7 +770,7 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
25556 return -EBUSY;
25557 text_poke((void *)bpt->bpt_addr, arch_kgdb_ops.gdb_bpt_instr,
25558 BREAK_INSTR_SIZE);
25559- err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
25560+ err = probe_kernel_read(opc, ktla_ktva((char *)bpt->bpt_addr), BREAK_INSTR_SIZE);
25561 if (err)
25562 return err;
25563 if (memcmp(opc, arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE))
25564@@ -792,13 +795,13 @@ int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
25565 if (mutex_is_locked(&text_mutex))
25566 goto knl_write;
25567 text_poke((void *)bpt->bpt_addr, bpt->saved_instr, BREAK_INSTR_SIZE);
25568- err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
25569+ err = probe_kernel_read(opc, ktla_ktva((char *)bpt->bpt_addr), BREAK_INSTR_SIZE);
25570 if (err || memcmp(opc, bpt->saved_instr, BREAK_INSTR_SIZE))
25571 goto knl_write;
25572 return err;
25573 knl_write:
25574 #endif /* CONFIG_DEBUG_RODATA */
25575- return probe_kernel_write((char *)bpt->bpt_addr,
25576+ return probe_kernel_write(ktla_ktva((char *)bpt->bpt_addr),
25577 (char *)bpt->saved_instr, BREAK_INSTR_SIZE);
25578 }
25579
25580diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
25581index 03189d8..4705700 100644
25582--- a/arch/x86/kernel/kprobes/core.c
25583+++ b/arch/x86/kernel/kprobes/core.c
25584@@ -120,9 +120,12 @@ __synthesize_relative_insn(void *from, void *to, u8 op)
25585 s32 raddr;
25586 } __packed *insn;
25587
25588- insn = (struct __arch_relative_insn *)from;
25589+ insn = (struct __arch_relative_insn *)ktla_ktva(from);
25590+
25591+ pax_open_kernel();
25592 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
25593 insn->op = op;
25594+ pax_close_kernel();
25595 }
25596
25597 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
25598@@ -168,7 +171,7 @@ int can_boost(kprobe_opcode_t *opcodes)
25599 kprobe_opcode_t opcode;
25600 kprobe_opcode_t *orig_opcodes = opcodes;
25601
25602- if (search_exception_tables((unsigned long)opcodes))
25603+ if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
25604 return 0; /* Page fault may occur on this address. */
25605
25606 retry:
25607@@ -260,12 +263,12 @@ __recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr)
25608 * Fortunately, we know that the original code is the ideal 5-byte
25609 * long NOP.
25610 */
25611- memcpy(buf, (void *)addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
25612+ memcpy(buf, (void *)ktla_ktva(addr), MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
25613 if (faddr)
25614 memcpy(buf, ideal_nops[NOP_ATOMIC5], 5);
25615 else
25616 buf[0] = kp->opcode;
25617- return (unsigned long)buf;
25618+ return ktva_ktla((unsigned long)buf);
25619 }
25620
25621 /*
25622@@ -367,7 +370,9 @@ int __copy_instruction(u8 *dest, u8 *src)
25623 /* Another subsystem puts a breakpoint, failed to recover */
25624 if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
25625 return 0;
25626+ pax_open_kernel();
25627 memcpy(dest, insn.kaddr, length);
25628+ pax_close_kernel();
25629
25630 #ifdef CONFIG_X86_64
25631 if (insn_rip_relative(&insn)) {
25632@@ -394,7 +399,9 @@ int __copy_instruction(u8 *dest, u8 *src)
25633 return 0;
25634 }
25635 disp = (u8 *) dest + insn_offset_displacement(&insn);
25636+ pax_open_kernel();
25637 *(s32 *) disp = (s32) newdisp;
25638+ pax_close_kernel();
25639 }
25640 #endif
25641 return length;
25642@@ -536,7 +543,7 @@ static void setup_singlestep(struct kprobe *p, struct pt_regs *regs,
25643 * nor set current_kprobe, because it doesn't use single
25644 * stepping.
25645 */
25646- regs->ip = (unsigned long)p->ainsn.insn;
25647+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
25648 preempt_enable_no_resched();
25649 return;
25650 }
25651@@ -553,9 +560,9 @@ static void setup_singlestep(struct kprobe *p, struct pt_regs *regs,
25652 regs->flags &= ~X86_EFLAGS_IF;
25653 /* single step inline if the instruction is an int3 */
25654 if (p->opcode == BREAKPOINT_INSTRUCTION)
25655- regs->ip = (unsigned long)p->addr;
25656+ regs->ip = ktla_ktva((unsigned long)p->addr);
25657 else
25658- regs->ip = (unsigned long)p->ainsn.insn;
25659+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
25660 }
25661 NOKPROBE_SYMBOL(setup_singlestep);
25662
25663@@ -605,7 +612,7 @@ int kprobe_int3_handler(struct pt_regs *regs)
25664 struct kprobe *p;
25665 struct kprobe_ctlblk *kcb;
25666
25667- if (user_mode_vm(regs))
25668+ if (user_mode(regs))
25669 return 0;
25670
25671 addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
25672@@ -640,7 +647,7 @@ int kprobe_int3_handler(struct pt_regs *regs)
25673 setup_singlestep(p, regs, kcb, 0);
25674 return 1;
25675 }
25676- } else if (*addr != BREAKPOINT_INSTRUCTION) {
25677+ } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
25678 /*
25679 * The breakpoint instruction was removed right
25680 * after we hit it. Another cpu has removed
25681@@ -687,6 +694,9 @@ static void __used kretprobe_trampoline_holder(void)
25682 " movq %rax, 152(%rsp)\n"
25683 RESTORE_REGS_STRING
25684 " popfq\n"
25685+#ifdef KERNEXEC_PLUGIN
25686+ " btsq $63,(%rsp)\n"
25687+#endif
25688 #else
25689 " pushf\n"
25690 SAVE_REGS_STRING
25691@@ -827,7 +837,7 @@ static void resume_execution(struct kprobe *p, struct pt_regs *regs,
25692 struct kprobe_ctlblk *kcb)
25693 {
25694 unsigned long *tos = stack_addr(regs);
25695- unsigned long copy_ip = (unsigned long)p->ainsn.insn;
25696+ unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
25697 unsigned long orig_ip = (unsigned long)p->addr;
25698 kprobe_opcode_t *insn = p->ainsn.insn;
25699
25700@@ -1010,7 +1020,7 @@ int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val,
25701 struct die_args *args = data;
25702 int ret = NOTIFY_DONE;
25703
25704- if (args->regs && user_mode_vm(args->regs))
25705+ if (args->regs && user_mode(args->regs))
25706 return ret;
25707
25708 if (val == DIE_GPF) {
25709diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
25710index 7b3b9d1..e2478b91 100644
25711--- a/arch/x86/kernel/kprobes/opt.c
25712+++ b/arch/x86/kernel/kprobes/opt.c
25713@@ -79,6 +79,7 @@ found:
25714 /* Insert a move instruction which sets a pointer to eax/rdi (1st arg). */
25715 static void synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val)
25716 {
25717+ pax_open_kernel();
25718 #ifdef CONFIG_X86_64
25719 *addr++ = 0x48;
25720 *addr++ = 0xbf;
25721@@ -86,6 +87,7 @@ static void synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val)
25722 *addr++ = 0xb8;
25723 #endif
25724 *(unsigned long *)addr = val;
25725+ pax_close_kernel();
25726 }
25727
25728 asm (
25729@@ -342,7 +344,7 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op,
25730 * Verify if the address gap is in 2GB range, because this uses
25731 * a relative jump.
25732 */
25733- rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
25734+ rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
25735 if (abs(rel) > 0x7fffffff) {
25736 __arch_remove_optimized_kprobe(op, 0);
25737 return -ERANGE;
25738@@ -359,16 +361,18 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op,
25739 op->optinsn.size = ret;
25740
25741 /* Copy arch-dep-instance from template */
25742- memcpy(buf, &optprobe_template_entry, TMPL_END_IDX);
25743+ pax_open_kernel();
25744+ memcpy(buf, ktla_ktva(&optprobe_template_entry), TMPL_END_IDX);
25745+ pax_close_kernel();
25746
25747 /* Set probe information */
25748 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
25749
25750 /* Set probe function call */
25751- synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
25752+ synthesize_relcall(ktva_ktla(buf) + TMPL_CALL_IDX, optimized_callback);
25753
25754 /* Set returning jmp instruction at the tail of out-of-line buffer */
25755- synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
25756+ synthesize_reljump(ktva_ktla(buf) + TMPL_END_IDX + op->optinsn.size,
25757 (u8 *)op->kp.addr + op->optinsn.size);
25758
25759 flush_icache_range((unsigned long) buf,
25760@@ -393,7 +397,7 @@ void arch_optimize_kprobes(struct list_head *oplist)
25761 WARN_ON(kprobe_disabled(&op->kp));
25762
25763 /* Backup instructions which will be replaced by jump address */
25764- memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
25765+ memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
25766 RELATIVE_ADDR_SIZE);
25767
25768 insn_buf[0] = RELATIVEJUMP_OPCODE;
25769@@ -441,7 +445,7 @@ int setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter)
25770 /* This kprobe is really able to run optimized path. */
25771 op = container_of(p, struct optimized_kprobe, kp);
25772 /* Detour through copied instructions */
25773- regs->ip = (unsigned long)op->optinsn.insn + TMPL_END_IDX;
25774+ regs->ip = ktva_ktla((unsigned long)op->optinsn.insn) + TMPL_END_IDX;
25775 if (!reenter)
25776 reset_current_kprobe();
25777 preempt_enable_no_resched();
25778diff --git a/arch/x86/kernel/ksysfs.c b/arch/x86/kernel/ksysfs.c
25779index c2bedae..25e7ab60 100644
25780--- a/arch/x86/kernel/ksysfs.c
25781+++ b/arch/x86/kernel/ksysfs.c
25782@@ -184,7 +184,7 @@ out:
25783
25784 static struct kobj_attribute type_attr = __ATTR_RO(type);
25785
25786-static struct bin_attribute data_attr = {
25787+static bin_attribute_no_const data_attr __read_only = {
25788 .attr = {
25789 .name = "data",
25790 .mode = S_IRUGO,
25791diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
25792index c37886d..3f425e3 100644
25793--- a/arch/x86/kernel/ldt.c
25794+++ b/arch/x86/kernel/ldt.c
25795@@ -66,13 +66,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
25796 if (reload) {
25797 #ifdef CONFIG_SMP
25798 preempt_disable();
25799- load_LDT(pc);
25800+ load_LDT_nolock(pc);
25801 if (!cpumask_equal(mm_cpumask(current->mm),
25802 cpumask_of(smp_processor_id())))
25803 smp_call_function(flush_ldt, current->mm, 1);
25804 preempt_enable();
25805 #else
25806- load_LDT(pc);
25807+ load_LDT_nolock(pc);
25808 #endif
25809 }
25810 if (oldsize) {
25811@@ -94,7 +94,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
25812 return err;
25813
25814 for (i = 0; i < old->size; i++)
25815- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
25816+ write_ldt_entry(new->ldt, i, old->ldt + i);
25817 return 0;
25818 }
25819
25820@@ -115,6 +115,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
25821 retval = copy_ldt(&mm->context, &old_mm->context);
25822 mutex_unlock(&old_mm->context.lock);
25823 }
25824+
25825+ if (tsk == current) {
25826+ mm->context.vdso = 0;
25827+
25828+#ifdef CONFIG_X86_32
25829+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
25830+ mm->context.user_cs_base = 0UL;
25831+ mm->context.user_cs_limit = ~0UL;
25832+
25833+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
25834+ cpumask_clear(&mm->context.cpu_user_cs_mask);
25835+#endif
25836+
25837+#endif
25838+#endif
25839+
25840+ }
25841+
25842 return retval;
25843 }
25844
25845@@ -229,6 +247,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
25846 }
25847 }
25848
25849+#ifdef CONFIG_PAX_SEGMEXEC
25850+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
25851+ error = -EINVAL;
25852+ goto out_unlock;
25853+ }
25854+#endif
25855+
25856 if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) {
25857 error = -EINVAL;
25858 goto out_unlock;
25859diff --git a/arch/x86/kernel/livepatch.c b/arch/x86/kernel/livepatch.c
25860index ff3c3101d..d7c0cd8 100644
25861--- a/arch/x86/kernel/livepatch.c
25862+++ b/arch/x86/kernel/livepatch.c
25863@@ -41,9 +41,10 @@ int klp_write_module_reloc(struct module *mod, unsigned long type,
25864 int ret, numpages, size = 4;
25865 bool readonly;
25866 unsigned long val;
25867- unsigned long core = (unsigned long)mod->module_core;
25868- unsigned long core_ro_size = mod->core_ro_size;
25869- unsigned long core_size = mod->core_size;
25870+ unsigned long core_rx = (unsigned long)mod->module_core_rx;
25871+ unsigned long core_rw = (unsigned long)mod->module_core_rw;
25872+ unsigned long core_size_rx = mod->core_size_rx;
25873+ unsigned long core_size_rw = mod->core_size_rw;
25874
25875 switch (type) {
25876 case R_X86_64_NONE:
25877@@ -66,11 +67,12 @@ int klp_write_module_reloc(struct module *mod, unsigned long type,
25878 return -EINVAL;
25879 }
25880
25881- if (loc < core || loc >= core + core_size)
25882+ if ((loc < core_rx || loc >= core_rx + core_size_rx) &&
25883+ (loc < core_rw || loc >= core_rw + core_size_rw))
25884 /* loc does not point to any symbol inside the module */
25885 return -EINVAL;
25886
25887- if (loc < core + core_ro_size)
25888+ if (loc < core_rx + core_size_rx)
25889 readonly = true;
25890 else
25891 readonly = false;
25892diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
25893index 469b23d..5449cfe 100644
25894--- a/arch/x86/kernel/machine_kexec_32.c
25895+++ b/arch/x86/kernel/machine_kexec_32.c
25896@@ -26,7 +26,7 @@
25897 #include <asm/cacheflush.h>
25898 #include <asm/debugreg.h>
25899
25900-static void set_idt(void *newidt, __u16 limit)
25901+static void set_idt(struct desc_struct *newidt, __u16 limit)
25902 {
25903 struct desc_ptr curidt;
25904
25905@@ -38,7 +38,7 @@ static void set_idt(void *newidt, __u16 limit)
25906 }
25907
25908
25909-static void set_gdt(void *newgdt, __u16 limit)
25910+static void set_gdt(struct desc_struct *newgdt, __u16 limit)
25911 {
25912 struct desc_ptr curgdt;
25913
25914@@ -216,7 +216,7 @@ void machine_kexec(struct kimage *image)
25915 }
25916
25917 control_page = page_address(image->control_code_page);
25918- memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
25919+ memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
25920
25921 relocate_kernel_ptr = control_page;
25922 page_list[PA_CONTROL_PAGE] = __pa(control_page);
25923diff --git a/arch/x86/kernel/mcount_64.S b/arch/x86/kernel/mcount_64.S
25924index 94ea120..4154cea 100644
25925--- a/arch/x86/kernel/mcount_64.S
25926+++ b/arch/x86/kernel/mcount_64.S
25927@@ -7,7 +7,7 @@
25928 #include <linux/linkage.h>
25929 #include <asm/ptrace.h>
25930 #include <asm/ftrace.h>
25931-
25932+#include <asm/alternative-asm.h>
25933
25934 .code64
25935 .section .entry.text, "ax"
25936@@ -148,8 +148,9 @@
25937 #ifdef CONFIG_DYNAMIC_FTRACE
25938
25939 ENTRY(function_hook)
25940+ pax_force_retaddr
25941 retq
25942-END(function_hook)
25943+ENDPROC(function_hook)
25944
25945 ENTRY(ftrace_caller)
25946 /* save_mcount_regs fills in first two parameters */
25947@@ -181,8 +182,9 @@ GLOBAL(ftrace_graph_call)
25948 #endif
25949
25950 GLOBAL(ftrace_stub)
25951+ pax_force_retaddr
25952 retq
25953-END(ftrace_caller)
25954+ENDPROC(ftrace_caller)
25955
25956 ENTRY(ftrace_regs_caller)
25957 /* Save the current flags before any operations that can change them */
25958@@ -253,7 +255,7 @@ GLOBAL(ftrace_regs_caller_end)
25959
25960 jmp ftrace_return
25961
25962-END(ftrace_regs_caller)
25963+ENDPROC(ftrace_regs_caller)
25964
25965
25966 #else /* ! CONFIG_DYNAMIC_FTRACE */
25967@@ -272,18 +274,20 @@ fgraph_trace:
25968 #endif
25969
25970 GLOBAL(ftrace_stub)
25971+ pax_force_retaddr
25972 retq
25973
25974 trace:
25975 /* save_mcount_regs fills in first two parameters */
25976 save_mcount_regs
25977
25978+ pax_force_fptr ftrace_trace_function
25979 call *ftrace_trace_function
25980
25981 restore_mcount_regs
25982
25983 jmp fgraph_trace
25984-END(function_hook)
25985+ENDPROC(function_hook)
25986 #endif /* CONFIG_DYNAMIC_FTRACE */
25987 #endif /* CONFIG_FUNCTION_TRACER */
25988
25989@@ -305,8 +309,9 @@ ENTRY(ftrace_graph_caller)
25990
25991 restore_mcount_regs
25992
25993+ pax_force_retaddr
25994 retq
25995-END(ftrace_graph_caller)
25996+ENDPROC(ftrace_graph_caller)
25997
25998 GLOBAL(return_to_handler)
25999 subq $24, %rsp
26000@@ -322,5 +327,7 @@ GLOBAL(return_to_handler)
26001 movq 8(%rsp), %rdx
26002 movq (%rsp), %rax
26003 addq $24, %rsp
26004+ pax_force_fptr %rdi
26005 jmp *%rdi
26006+ENDPROC(return_to_handler)
26007 #endif
26008diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
26009index d1ac80b..f593701 100644
26010--- a/arch/x86/kernel/module.c
26011+++ b/arch/x86/kernel/module.c
26012@@ -82,17 +82,17 @@ static unsigned long int get_module_load_offset(void)
26013 }
26014 #endif
26015
26016-void *module_alloc(unsigned long size)
26017+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
26018 {
26019 void *p;
26020
26021- if (PAGE_ALIGN(size) > MODULES_LEN)
26022+ if (!size || PAGE_ALIGN(size) > MODULES_LEN)
26023 return NULL;
26024
26025 p = __vmalloc_node_range(size, MODULE_ALIGN,
26026 MODULES_VADDR + get_module_load_offset(),
26027- MODULES_END, GFP_KERNEL | __GFP_HIGHMEM,
26028- PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
26029+ MODULES_END, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
26030+ prot, 0, NUMA_NO_NODE,
26031 __builtin_return_address(0));
26032 if (p && (kasan_module_alloc(p, size) < 0)) {
26033 vfree(p);
26034@@ -102,6 +102,51 @@ void *module_alloc(unsigned long size)
26035 return p;
26036 }
26037
26038+void *module_alloc(unsigned long size)
26039+{
26040+
26041+#ifdef CONFIG_PAX_KERNEXEC
26042+ return __module_alloc(size, PAGE_KERNEL);
26043+#else
26044+ return __module_alloc(size, PAGE_KERNEL_EXEC);
26045+#endif
26046+
26047+}
26048+
26049+#ifdef CONFIG_PAX_KERNEXEC
26050+#ifdef CONFIG_X86_32
26051+void *module_alloc_exec(unsigned long size)
26052+{
26053+ struct vm_struct *area;
26054+
26055+ if (size == 0)
26056+ return NULL;
26057+
26058+ area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
26059+return area ? area->addr : NULL;
26060+}
26061+EXPORT_SYMBOL(module_alloc_exec);
26062+
26063+void module_memfree_exec(void *module_region)
26064+{
26065+ vunmap(module_region);
26066+}
26067+EXPORT_SYMBOL(module_memfree_exec);
26068+#else
26069+void module_memfree_exec(void *module_region)
26070+{
26071+ module_memfree(module_region);
26072+}
26073+EXPORT_SYMBOL(module_memfree_exec);
26074+
26075+void *module_alloc_exec(unsigned long size)
26076+{
26077+ return __module_alloc(size, PAGE_KERNEL_RX);
26078+}
26079+EXPORT_SYMBOL(module_alloc_exec);
26080+#endif
26081+#endif
26082+
26083 #ifdef CONFIG_X86_32
26084 int apply_relocate(Elf32_Shdr *sechdrs,
26085 const char *strtab,
26086@@ -112,14 +157,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
26087 unsigned int i;
26088 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
26089 Elf32_Sym *sym;
26090- uint32_t *location;
26091+ uint32_t *plocation, location;
26092
26093 DEBUGP("Applying relocate section %u to %u\n",
26094 relsec, sechdrs[relsec].sh_info);
26095 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
26096 /* This is where to make the change */
26097- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
26098- + rel[i].r_offset;
26099+ plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
26100+ location = (uint32_t)plocation;
26101+ if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
26102+ plocation = ktla_ktva((void *)plocation);
26103 /* This is the symbol it is referring to. Note that all
26104 undefined symbols have been resolved. */
26105 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
26106@@ -128,11 +175,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
26107 switch (ELF32_R_TYPE(rel[i].r_info)) {
26108 case R_386_32:
26109 /* We add the value into the location given */
26110- *location += sym->st_value;
26111+ pax_open_kernel();
26112+ *plocation += sym->st_value;
26113+ pax_close_kernel();
26114 break;
26115 case R_386_PC32:
26116 /* Add the value, subtract its position */
26117- *location += sym->st_value - (uint32_t)location;
26118+ pax_open_kernel();
26119+ *plocation += sym->st_value - location;
26120+ pax_close_kernel();
26121 break;
26122 default:
26123 pr_err("%s: Unknown relocation: %u\n",
26124@@ -177,21 +228,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
26125 case R_X86_64_NONE:
26126 break;
26127 case R_X86_64_64:
26128+ pax_open_kernel();
26129 *(u64 *)loc = val;
26130+ pax_close_kernel();
26131 break;
26132 case R_X86_64_32:
26133+ pax_open_kernel();
26134 *(u32 *)loc = val;
26135+ pax_close_kernel();
26136 if (val != *(u32 *)loc)
26137 goto overflow;
26138 break;
26139 case R_X86_64_32S:
26140+ pax_open_kernel();
26141 *(s32 *)loc = val;
26142+ pax_close_kernel();
26143 if ((s64)val != *(s32 *)loc)
26144 goto overflow;
26145 break;
26146 case R_X86_64_PC32:
26147 val -= (u64)loc;
26148+ pax_open_kernel();
26149 *(u32 *)loc = val;
26150+ pax_close_kernel();
26151+
26152 #if 0
26153 if ((s64)val != *(s32 *)loc)
26154 goto overflow;
26155diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
26156index 113e707..0a690e1 100644
26157--- a/arch/x86/kernel/msr.c
26158+++ b/arch/x86/kernel/msr.c
26159@@ -39,6 +39,7 @@
26160 #include <linux/notifier.h>
26161 #include <linux/uaccess.h>
26162 #include <linux/gfp.h>
26163+#include <linux/grsecurity.h>
26164
26165 #include <asm/processor.h>
26166 #include <asm/msr.h>
26167@@ -105,6 +106,11 @@ static ssize_t msr_write(struct file *file, const char __user *buf,
26168 int err = 0;
26169 ssize_t bytes = 0;
26170
26171+#ifdef CONFIG_GRKERNSEC_KMEM
26172+ gr_handle_msr_write();
26173+ return -EPERM;
26174+#endif
26175+
26176 if (count % 8)
26177 return -EINVAL; /* Invalid chunk size */
26178
26179@@ -152,6 +158,10 @@ static long msr_ioctl(struct file *file, unsigned int ioc, unsigned long arg)
26180 err = -EBADF;
26181 break;
26182 }
26183+#ifdef CONFIG_GRKERNSEC_KMEM
26184+ gr_handle_msr_write();
26185+ return -EPERM;
26186+#endif
26187 if (copy_from_user(&regs, uregs, sizeof regs)) {
26188 err = -EFAULT;
26189 break;
26190@@ -235,7 +245,7 @@ static int msr_class_cpu_callback(struct notifier_block *nfb,
26191 return notifier_from_errno(err);
26192 }
26193
26194-static struct notifier_block __refdata msr_class_cpu_notifier = {
26195+static struct notifier_block msr_class_cpu_notifier = {
26196 .notifier_call = msr_class_cpu_callback,
26197 };
26198
26199diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
26200index c3e985d..110a36a 100644
26201--- a/arch/x86/kernel/nmi.c
26202+++ b/arch/x86/kernel/nmi.c
26203@@ -98,16 +98,16 @@ fs_initcall(nmi_warning_debugfs);
26204
26205 static void nmi_max_handler(struct irq_work *w)
26206 {
26207- struct nmiaction *a = container_of(w, struct nmiaction, irq_work);
26208+ struct nmiwork *n = container_of(w, struct nmiwork, irq_work);
26209 int remainder_ns, decimal_msecs;
26210- u64 whole_msecs = ACCESS_ONCE(a->max_duration);
26211+ u64 whole_msecs = ACCESS_ONCE(n->max_duration);
26212
26213 remainder_ns = do_div(whole_msecs, (1000 * 1000));
26214 decimal_msecs = remainder_ns / 1000;
26215
26216 printk_ratelimited(KERN_INFO
26217 "INFO: NMI handler (%ps) took too long to run: %lld.%03d msecs\n",
26218- a->handler, whole_msecs, decimal_msecs);
26219+ n->action->handler, whole_msecs, decimal_msecs);
26220 }
26221
26222 static int nmi_handle(unsigned int type, struct pt_regs *regs, bool b2b)
26223@@ -134,11 +134,11 @@ static int nmi_handle(unsigned int type, struct pt_regs *regs, bool b2b)
26224 delta = sched_clock() - delta;
26225 trace_nmi_handler(a->handler, (int)delta, thishandled);
26226
26227- if (delta < nmi_longest_ns || delta < a->max_duration)
26228+ if (delta < nmi_longest_ns || delta < a->work->max_duration)
26229 continue;
26230
26231- a->max_duration = delta;
26232- irq_work_queue(&a->irq_work);
26233+ a->work->max_duration = delta;
26234+ irq_work_queue(&a->work->irq_work);
26235 }
26236
26237 rcu_read_unlock();
26238@@ -148,7 +148,7 @@ static int nmi_handle(unsigned int type, struct pt_regs *regs, bool b2b)
26239 }
26240 NOKPROBE_SYMBOL(nmi_handle);
26241
26242-int __register_nmi_handler(unsigned int type, struct nmiaction *action)
26243+int __register_nmi_handler(unsigned int type, const struct nmiaction *action)
26244 {
26245 struct nmi_desc *desc = nmi_to_desc(type);
26246 unsigned long flags;
26247@@ -156,7 +156,8 @@ int __register_nmi_handler(unsigned int type, struct nmiaction *action)
26248 if (!action->handler)
26249 return -EINVAL;
26250
26251- init_irq_work(&action->irq_work, nmi_max_handler);
26252+ action->work->action = action;
26253+ init_irq_work(&action->work->irq_work, nmi_max_handler);
26254
26255 spin_lock_irqsave(&desc->lock, flags);
26256
26257@@ -174,9 +175,9 @@ int __register_nmi_handler(unsigned int type, struct nmiaction *action)
26258 * event confuses some handlers (kdump uses this flag)
26259 */
26260 if (action->flags & NMI_FLAG_FIRST)
26261- list_add_rcu(&action->list, &desc->head);
26262+ pax_list_add_rcu((struct list_head *)&action->list, &desc->head);
26263 else
26264- list_add_tail_rcu(&action->list, &desc->head);
26265+ pax_list_add_tail_rcu((struct list_head *)&action->list, &desc->head);
26266
26267 spin_unlock_irqrestore(&desc->lock, flags);
26268 return 0;
26269@@ -199,7 +200,7 @@ void unregister_nmi_handler(unsigned int type, const char *name)
26270 if (!strcmp(n->name, name)) {
26271 WARN(in_nmi(),
26272 "Trying to free NMI (%s) from NMI context!\n", n->name);
26273- list_del_rcu(&n->list);
26274+ pax_list_del_rcu((struct list_head *)&n->list);
26275 break;
26276 }
26277 }
26278@@ -528,6 +529,17 @@ static inline void nmi_nesting_postprocess(void)
26279 dotraplinkage notrace void
26280 do_nmi(struct pt_regs *regs, long error_code)
26281 {
26282+
26283+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
26284+ if (!user_mode(regs)) {
26285+ unsigned long cs = regs->cs & 0xFFFF;
26286+ unsigned long ip = ktva_ktla(regs->ip);
26287+
26288+ if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
26289+ regs->ip = ip;
26290+ }
26291+#endif
26292+
26293 nmi_nesting_preprocess(regs);
26294
26295 nmi_enter();
26296diff --git a/arch/x86/kernel/nmi_selftest.c b/arch/x86/kernel/nmi_selftest.c
26297index 6d9582e..f746287 100644
26298--- a/arch/x86/kernel/nmi_selftest.c
26299+++ b/arch/x86/kernel/nmi_selftest.c
26300@@ -43,7 +43,7 @@ static void __init init_nmi_testsuite(void)
26301 {
26302 /* trap all the unknown NMIs we may generate */
26303 register_nmi_handler(NMI_UNKNOWN, nmi_unk_cb, 0, "nmi_selftest_unk",
26304- __initdata);
26305+ __initconst);
26306 }
26307
26308 static void __init cleanup_nmi_testsuite(void)
26309@@ -66,7 +66,7 @@ static void __init test_nmi_ipi(struct cpumask *mask)
26310 unsigned long timeout;
26311
26312 if (register_nmi_handler(NMI_LOCAL, test_nmi_ipi_callback,
26313- NMI_FLAG_FIRST, "nmi_selftest", __initdata)) {
26314+ NMI_FLAG_FIRST, "nmi_selftest", __initconst)) {
26315 nmi_fail = FAILURE;
26316 return;
26317 }
26318diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
26319index bbb6c73..24a58ef 100644
26320--- a/arch/x86/kernel/paravirt-spinlocks.c
26321+++ b/arch/x86/kernel/paravirt-spinlocks.c
26322@@ -8,7 +8,7 @@
26323
26324 #include <asm/paravirt.h>
26325
26326-struct pv_lock_ops pv_lock_ops = {
26327+struct pv_lock_ops pv_lock_ops __read_only = {
26328 #ifdef CONFIG_SMP
26329 .lock_spinning = __PV_IS_CALLEE_SAVE(paravirt_nop),
26330 .unlock_kick = paravirt_nop,
26331diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
26332index 548d25f..f8fb99c 100644
26333--- a/arch/x86/kernel/paravirt.c
26334+++ b/arch/x86/kernel/paravirt.c
26335@@ -56,6 +56,9 @@ u64 _paravirt_ident_64(u64 x)
26336 {
26337 return x;
26338 }
26339+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
26340+PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
26341+#endif
26342
26343 void __init default_banner(void)
26344 {
26345@@ -142,16 +145,20 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
26346
26347 if (opfunc == NULL)
26348 /* If there's no function, patch it with a ud2a (BUG) */
26349- ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
26350- else if (opfunc == _paravirt_nop)
26351+ ret = paravirt_patch_insns(insnbuf, len, ktva_ktla(ud2a), ud2a+sizeof(ud2a));
26352+ else if (opfunc == (void *)_paravirt_nop)
26353 /* If the operation is a nop, then nop the callsite */
26354 ret = paravirt_patch_nop();
26355
26356 /* identity functions just return their single argument */
26357- else if (opfunc == _paravirt_ident_32)
26358+ else if (opfunc == (void *)_paravirt_ident_32)
26359 ret = paravirt_patch_ident_32(insnbuf, len);
26360- else if (opfunc == _paravirt_ident_64)
26361+ else if (opfunc == (void *)_paravirt_ident_64)
26362 ret = paravirt_patch_ident_64(insnbuf, len);
26363+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
26364+ else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
26365+ ret = paravirt_patch_ident_64(insnbuf, len);
26366+#endif
26367
26368 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
26369 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
26370@@ -176,7 +183,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
26371 if (insn_len > len || start == NULL)
26372 insn_len = len;
26373 else
26374- memcpy(insnbuf, start, insn_len);
26375+ memcpy(insnbuf, ktla_ktva(start), insn_len);
26376
26377 return insn_len;
26378 }
26379@@ -300,7 +307,7 @@ enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
26380 return this_cpu_read(paravirt_lazy_mode);
26381 }
26382
26383-struct pv_info pv_info = {
26384+struct pv_info pv_info __read_only = {
26385 .name = "bare hardware",
26386 .paravirt_enabled = 0,
26387 .kernel_rpl = 0,
26388@@ -311,16 +318,16 @@ struct pv_info pv_info = {
26389 #endif
26390 };
26391
26392-struct pv_init_ops pv_init_ops = {
26393+struct pv_init_ops pv_init_ops __read_only = {
26394 .patch = native_patch,
26395 };
26396
26397-struct pv_time_ops pv_time_ops = {
26398+struct pv_time_ops pv_time_ops __read_only = {
26399 .sched_clock = native_sched_clock,
26400 .steal_clock = native_steal_clock,
26401 };
26402
26403-__visible struct pv_irq_ops pv_irq_ops = {
26404+__visible struct pv_irq_ops pv_irq_ops __read_only = {
26405 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
26406 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
26407 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
26408@@ -332,7 +339,7 @@ __visible struct pv_irq_ops pv_irq_ops = {
26409 #endif
26410 };
26411
26412-__visible struct pv_cpu_ops pv_cpu_ops = {
26413+__visible struct pv_cpu_ops pv_cpu_ops __read_only = {
26414 .cpuid = native_cpuid,
26415 .get_debugreg = native_get_debugreg,
26416 .set_debugreg = native_set_debugreg,
26417@@ -395,21 +402,26 @@ NOKPROBE_SYMBOL(native_get_debugreg);
26418 NOKPROBE_SYMBOL(native_set_debugreg);
26419 NOKPROBE_SYMBOL(native_load_idt);
26420
26421-struct pv_apic_ops pv_apic_ops = {
26422+struct pv_apic_ops pv_apic_ops __read_only= {
26423 #ifdef CONFIG_X86_LOCAL_APIC
26424 .startup_ipi_hook = paravirt_nop,
26425 #endif
26426 };
26427
26428-#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
26429+#ifdef CONFIG_X86_32
26430+#ifdef CONFIG_X86_PAE
26431+/* 64-bit pagetable entries */
26432+#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
26433+#else
26434 /* 32-bit pagetable entries */
26435 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
26436+#endif
26437 #else
26438 /* 64-bit pagetable entries */
26439 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
26440 #endif
26441
26442-struct pv_mmu_ops pv_mmu_ops = {
26443+struct pv_mmu_ops pv_mmu_ops __read_only = {
26444
26445 .read_cr2 = native_read_cr2,
26446 .write_cr2 = native_write_cr2,
26447@@ -459,6 +471,7 @@ struct pv_mmu_ops pv_mmu_ops = {
26448 .make_pud = PTE_IDENT,
26449
26450 .set_pgd = native_set_pgd,
26451+ .set_pgd_batched = native_set_pgd_batched,
26452 #endif
26453 #endif /* PAGETABLE_LEVELS >= 3 */
26454
26455@@ -479,6 +492,12 @@ struct pv_mmu_ops pv_mmu_ops = {
26456 },
26457
26458 .set_fixmap = native_set_fixmap,
26459+
26460+#ifdef CONFIG_PAX_KERNEXEC
26461+ .pax_open_kernel = native_pax_open_kernel,
26462+ .pax_close_kernel = native_pax_close_kernel,
26463+#endif
26464+
26465 };
26466
26467 EXPORT_SYMBOL_GPL(pv_time_ops);
26468diff --git a/arch/x86/kernel/paravirt_patch_64.c b/arch/x86/kernel/paravirt_patch_64.c
26469index a1da673..b6f5831 100644
26470--- a/arch/x86/kernel/paravirt_patch_64.c
26471+++ b/arch/x86/kernel/paravirt_patch_64.c
26472@@ -9,7 +9,11 @@ DEF_NATIVE(pv_irq_ops, save_fl, "pushfq; popq %rax");
26473 DEF_NATIVE(pv_mmu_ops, read_cr2, "movq %cr2, %rax");
26474 DEF_NATIVE(pv_mmu_ops, read_cr3, "movq %cr3, %rax");
26475 DEF_NATIVE(pv_mmu_ops, write_cr3, "movq %rdi, %cr3");
26476+
26477+#ifndef CONFIG_PAX_MEMORY_UDEREF
26478 DEF_NATIVE(pv_mmu_ops, flush_tlb_single, "invlpg (%rdi)");
26479+#endif
26480+
26481 DEF_NATIVE(pv_cpu_ops, clts, "clts");
26482 DEF_NATIVE(pv_cpu_ops, wbinvd, "wbinvd");
26483
26484@@ -57,7 +61,11 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
26485 PATCH_SITE(pv_mmu_ops, read_cr3);
26486 PATCH_SITE(pv_mmu_ops, write_cr3);
26487 PATCH_SITE(pv_cpu_ops, clts);
26488+
26489+#ifndef CONFIG_PAX_MEMORY_UDEREF
26490 PATCH_SITE(pv_mmu_ops, flush_tlb_single);
26491+#endif
26492+
26493 PATCH_SITE(pv_cpu_ops, wbinvd);
26494
26495 patch_site:
26496diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
26497index 0497f71..7186c0d 100644
26498--- a/arch/x86/kernel/pci-calgary_64.c
26499+++ b/arch/x86/kernel/pci-calgary_64.c
26500@@ -1347,7 +1347,7 @@ static void __init get_tce_space_from_tar(void)
26501 tce_space = be64_to_cpu(readq(target));
26502 tce_space = tce_space & TAR_SW_BITS;
26503
26504- tce_space = tce_space & (~specified_table_size);
26505+ tce_space = tce_space & (~(unsigned long)specified_table_size);
26506 info->tce_space = (u64 *)__va(tce_space);
26507 }
26508 }
26509diff --git a/arch/x86/kernel/pci-iommu_table.c b/arch/x86/kernel/pci-iommu_table.c
26510index 35ccf75..7a15747 100644
26511--- a/arch/x86/kernel/pci-iommu_table.c
26512+++ b/arch/x86/kernel/pci-iommu_table.c
26513@@ -2,7 +2,7 @@
26514 #include <asm/iommu_table.h>
26515 #include <linux/string.h>
26516 #include <linux/kallsyms.h>
26517-
26518+#include <linux/sched.h>
26519
26520 #define DEBUG 1
26521
26522diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
26523index 77dd0ad..9ec4723 100644
26524--- a/arch/x86/kernel/pci-swiotlb.c
26525+++ b/arch/x86/kernel/pci-swiotlb.c
26526@@ -33,7 +33,7 @@ void x86_swiotlb_free_coherent(struct device *dev, size_t size,
26527 struct dma_attrs *attrs)
26528 {
26529 if (is_swiotlb_buffer(dma_to_phys(dev, dma_addr)))
26530- swiotlb_free_coherent(dev, size, vaddr, dma_addr);
26531+ swiotlb_free_coherent(dev, size, vaddr, dma_addr, attrs);
26532 else
26533 dma_generic_free_coherent(dev, size, vaddr, dma_addr, attrs);
26534 }
26535diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
26536index a388bb8..97064ad 100644
26537--- a/arch/x86/kernel/process.c
26538+++ b/arch/x86/kernel/process.c
26539@@ -38,7 +38,8 @@
26540 * section. Since TSS's are completely CPU-local, we want them
26541 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
26542 */
26543-__visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
26544+struct tss_struct init_tss[NR_CPUS] __visible ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
26545+EXPORT_SYMBOL(init_tss);
26546
26547 #ifdef CONFIG_X86_64
26548 static DEFINE_PER_CPU(unsigned char, is_idle);
26549@@ -96,7 +97,7 @@ void arch_task_cache_init(void)
26550 task_xstate_cachep =
26551 kmem_cache_create("task_xstate", xstate_size,
26552 __alignof__(union thread_xstate),
26553- SLAB_PANIC | SLAB_NOTRACK, NULL);
26554+ SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
26555 setup_xstate_comp();
26556 }
26557
26558@@ -110,7 +111,7 @@ void exit_thread(void)
26559 unsigned long *bp = t->io_bitmap_ptr;
26560
26561 if (bp) {
26562- struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
26563+ struct tss_struct *tss = init_tss + get_cpu();
26564
26565 t->io_bitmap_ptr = NULL;
26566 clear_thread_flag(TIF_IO_BITMAP);
26567@@ -130,6 +131,9 @@ void flush_thread(void)
26568 {
26569 struct task_struct *tsk = current;
26570
26571+#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
26572+ loadsegment(gs, 0);
26573+#endif
26574 flush_ptrace_hw_breakpoint(tsk);
26575 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
26576 drop_init_fpu(tsk);
26577@@ -276,7 +280,7 @@ static void __exit_idle(void)
26578 void exit_idle(void)
26579 {
26580 /* idle loop has pid 0 */
26581- if (current->pid)
26582+ if (task_pid_nr(current))
26583 return;
26584 __exit_idle();
26585 }
26586@@ -329,7 +333,7 @@ bool xen_set_default_idle(void)
26587 return ret;
26588 }
26589 #endif
26590-void stop_this_cpu(void *dummy)
26591+__noreturn void stop_this_cpu(void *dummy)
26592 {
26593 local_irq_disable();
26594 /*
26595@@ -508,16 +512,37 @@ static int __init idle_setup(char *str)
26596 }
26597 early_param("idle", idle_setup);
26598
26599-unsigned long arch_align_stack(unsigned long sp)
26600+#ifdef CONFIG_PAX_RANDKSTACK
26601+void pax_randomize_kstack(struct pt_regs *regs)
26602 {
26603- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
26604- sp -= get_random_int() % 8192;
26605- return sp & ~0xf;
26606-}
26607+ struct thread_struct *thread = &current->thread;
26608+ unsigned long time;
26609
26610-unsigned long arch_randomize_brk(struct mm_struct *mm)
26611-{
26612- unsigned long range_end = mm->brk + 0x02000000;
26613- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
26614-}
26615+ if (!randomize_va_space)
26616+ return;
26617+
26618+ if (v8086_mode(regs))
26619+ return;
26620
26621+ rdtscl(time);
26622+
26623+ /* P4 seems to return a 0 LSB, ignore it */
26624+#ifdef CONFIG_MPENTIUM4
26625+ time &= 0x3EUL;
26626+ time <<= 2;
26627+#elif defined(CONFIG_X86_64)
26628+ time &= 0xFUL;
26629+ time <<= 4;
26630+#else
26631+ time &= 0x1FUL;
26632+ time <<= 3;
26633+#endif
26634+
26635+ thread->sp0 ^= time;
26636+ load_sp0(init_tss + smp_processor_id(), thread);
26637+
26638+#ifdef CONFIG_X86_64
26639+ this_cpu_write(kernel_stack, thread->sp0);
26640+#endif
26641+}
26642+#endif
26643diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
26644index 603c4f9..3a105d7 100644
26645--- a/arch/x86/kernel/process_32.c
26646+++ b/arch/x86/kernel/process_32.c
26647@@ -64,6 +64,7 @@ asmlinkage void ret_from_kernel_thread(void) __asm__("ret_from_kernel_thread");
26648 unsigned long thread_saved_pc(struct task_struct *tsk)
26649 {
26650 return ((unsigned long *)tsk->thread.sp)[3];
26651+//XXX return tsk->thread.eip;
26652 }
26653
26654 void __show_regs(struct pt_regs *regs, int all)
26655@@ -73,19 +74,18 @@ void __show_regs(struct pt_regs *regs, int all)
26656 unsigned long sp;
26657 unsigned short ss, gs;
26658
26659- if (user_mode_vm(regs)) {
26660+ if (user_mode(regs)) {
26661 sp = regs->sp;
26662 ss = regs->ss & 0xffff;
26663- gs = get_user_gs(regs);
26664 } else {
26665 sp = kernel_stack_pointer(regs);
26666 savesegment(ss, ss);
26667- savesegment(gs, gs);
26668 }
26669+ gs = get_user_gs(regs);
26670
26671 printk(KERN_DEFAULT "EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n",
26672 (u16)regs->cs, regs->ip, regs->flags,
26673- smp_processor_id());
26674+ raw_smp_processor_id());
26675 print_symbol("EIP is at %s\n", regs->ip);
26676
26677 printk(KERN_DEFAULT "EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
26678@@ -132,21 +132,22 @@ void release_thread(struct task_struct *dead_task)
26679 int copy_thread(unsigned long clone_flags, unsigned long sp,
26680 unsigned long arg, struct task_struct *p)
26681 {
26682- struct pt_regs *childregs = task_pt_regs(p);
26683+ struct pt_regs *childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
26684 struct task_struct *tsk;
26685 int err;
26686
26687 p->thread.sp = (unsigned long) childregs;
26688 p->thread.sp0 = (unsigned long) (childregs+1);
26689+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p) + 2 * sizeof(unsigned long);
26690 memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
26691
26692 if (unlikely(p->flags & PF_KTHREAD)) {
26693 /* kernel thread */
26694 memset(childregs, 0, sizeof(struct pt_regs));
26695 p->thread.ip = (unsigned long) ret_from_kernel_thread;
26696- task_user_gs(p) = __KERNEL_STACK_CANARY;
26697- childregs->ds = __USER_DS;
26698- childregs->es = __USER_DS;
26699+ savesegment(gs, childregs->gs);
26700+ childregs->ds = __KERNEL_DS;
26701+ childregs->es = __KERNEL_DS;
26702 childregs->fs = __KERNEL_PERCPU;
26703 childregs->bx = sp; /* function */
26704 childregs->bp = arg;
26705@@ -248,7 +249,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26706 struct thread_struct *prev = &prev_p->thread,
26707 *next = &next_p->thread;
26708 int cpu = smp_processor_id();
26709- struct tss_struct *tss = &per_cpu(init_tss, cpu);
26710+ struct tss_struct *tss = init_tss + cpu;
26711 fpu_switch_t fpu;
26712
26713 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
26714@@ -272,6 +273,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26715 */
26716 lazy_save_gs(prev->gs);
26717
26718+#ifdef CONFIG_PAX_MEMORY_UDEREF
26719+ __set_fs(task_thread_info(next_p)->addr_limit);
26720+#endif
26721+
26722 /*
26723 * Load the per-thread Thread-Local Storage descriptor.
26724 */
26725@@ -310,9 +315,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26726 */
26727 arch_end_context_switch(next_p);
26728
26729- this_cpu_write(kernel_stack,
26730- (unsigned long)task_stack_page(next_p) +
26731- THREAD_SIZE - KERNEL_STACK_OFFSET);
26732+ this_cpu_write(current_task, next_p);
26733+ this_cpu_write(current_tinfo, &next_p->tinfo);
26734+ this_cpu_write(kernel_stack, next->sp0);
26735
26736 /*
26737 * Restore %gs if needed (which is common)
26738@@ -322,8 +327,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26739
26740 switch_fpu_finish(next_p, fpu);
26741
26742- this_cpu_write(current_task, next_p);
26743-
26744 return prev_p;
26745 }
26746
26747@@ -353,4 +356,3 @@ unsigned long get_wchan(struct task_struct *p)
26748 } while (count++ < 16);
26749 return 0;
26750 }
26751-
26752diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
26753index 67fcc43..0d2c630 100644
26754--- a/arch/x86/kernel/process_64.c
26755+++ b/arch/x86/kernel/process_64.c
26756@@ -158,10 +158,11 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
26757 struct pt_regs *childregs;
26758 struct task_struct *me = current;
26759
26760- p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE;
26761+ p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE - 16;
26762 childregs = task_pt_regs(p);
26763 p->thread.sp = (unsigned long) childregs;
26764 p->thread.usersp = me->thread.usersp;
26765+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p) + 2 * sizeof(unsigned long);
26766 set_tsk_thread_flag(p, TIF_FORK);
26767 p->thread.io_bitmap_ptr = NULL;
26768
26769@@ -171,6 +172,8 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
26770 p->thread.fs = p->thread.fsindex ? 0 : me->thread.fs;
26771 savesegment(es, p->thread.es);
26772 savesegment(ds, p->thread.ds);
26773+ savesegment(ss, p->thread.ss);
26774+ BUG_ON(p->thread.ss == __UDEREF_KERNEL_DS);
26775 memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
26776
26777 if (unlikely(p->flags & PF_KTHREAD)) {
26778@@ -277,7 +280,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26779 struct thread_struct *prev = &prev_p->thread;
26780 struct thread_struct *next = &next_p->thread;
26781 int cpu = smp_processor_id();
26782- struct tss_struct *tss = &per_cpu(init_tss, cpu);
26783+ struct tss_struct *tss = init_tss + cpu;
26784 unsigned fsindex, gsindex;
26785 fpu_switch_t fpu;
26786
26787@@ -331,6 +334,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26788 if (unlikely(next->ds | prev->ds))
26789 loadsegment(ds, next->ds);
26790
26791+ savesegment(ss, prev->ss);
26792+ if (unlikely(next->ss != prev->ss))
26793+ loadsegment(ss, next->ss);
26794+
26795 /*
26796 * Switch FS and GS.
26797 *
26798@@ -404,6 +411,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26799 prev->usersp = this_cpu_read(old_rsp);
26800 this_cpu_write(old_rsp, next->usersp);
26801 this_cpu_write(current_task, next_p);
26802+ this_cpu_write(current_tinfo, &next_p->tinfo);
26803
26804 /*
26805 * If it were not for PREEMPT_ACTIVE we could guarantee that the
26806@@ -413,9 +421,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26807 task_thread_info(prev_p)->saved_preempt_count = this_cpu_read(__preempt_count);
26808 this_cpu_write(__preempt_count, task_thread_info(next_p)->saved_preempt_count);
26809
26810- this_cpu_write(kernel_stack,
26811- (unsigned long)task_stack_page(next_p) +
26812- THREAD_SIZE - KERNEL_STACK_OFFSET);
26813+ this_cpu_write(kernel_stack, next->sp0);
26814
26815 /*
26816 * Now maybe reload the debug registers and handle I/O bitmaps
26817@@ -485,12 +491,11 @@ unsigned long get_wchan(struct task_struct *p)
26818 if (!p || p == current || p->state == TASK_RUNNING)
26819 return 0;
26820 stack = (unsigned long)task_stack_page(p);
26821- if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
26822+ if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
26823 return 0;
26824 fp = *(u64 *)(p->thread.sp);
26825 do {
26826- if (fp < (unsigned long)stack ||
26827- fp >= (unsigned long)stack+THREAD_SIZE)
26828+ if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
26829 return 0;
26830 ip = *(u64 *)(fp+8);
26831 if (!in_sched_functions(ip))
26832diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
26833index e510618..5165ac0 100644
26834--- a/arch/x86/kernel/ptrace.c
26835+++ b/arch/x86/kernel/ptrace.c
26836@@ -186,10 +186,10 @@ unsigned long kernel_stack_pointer(struct pt_regs *regs)
26837 unsigned long sp = (unsigned long)&regs->sp;
26838 u32 *prev_esp;
26839
26840- if (context == (sp & ~(THREAD_SIZE - 1)))
26841+ if (context == ((sp + 8) & ~(THREAD_SIZE - 1)))
26842 return sp;
26843
26844- prev_esp = (u32 *)(context);
26845+ prev_esp = *(u32 **)(context);
26846 if (prev_esp)
26847 return (unsigned long)prev_esp;
26848
26849@@ -452,6 +452,20 @@ static int putreg(struct task_struct *child,
26850 if (child->thread.gs != value)
26851 return do_arch_prctl(child, ARCH_SET_GS, value);
26852 return 0;
26853+
26854+ case offsetof(struct user_regs_struct,ip):
26855+ /*
26856+ * Protect against any attempt to set ip to an
26857+ * impossible address. There are dragons lurking if the
26858+ * address is noncanonical. (This explicitly allows
26859+ * setting ip to TASK_SIZE_MAX, because user code can do
26860+ * that all by itself by running off the end of its
26861+ * address space.
26862+ */
26863+ if (value > TASK_SIZE_MAX)
26864+ return -EIO;
26865+ break;
26866+
26867 #endif
26868 }
26869
26870@@ -588,7 +602,7 @@ static void ptrace_triggered(struct perf_event *bp,
26871 static unsigned long ptrace_get_dr7(struct perf_event *bp[])
26872 {
26873 int i;
26874- int dr7 = 0;
26875+ unsigned long dr7 = 0;
26876 struct arch_hw_breakpoint *info;
26877
26878 for (i = 0; i < HBP_NUM; i++) {
26879@@ -822,7 +836,7 @@ long arch_ptrace(struct task_struct *child, long request,
26880 unsigned long addr, unsigned long data)
26881 {
26882 int ret;
26883- unsigned long __user *datap = (unsigned long __user *)data;
26884+ unsigned long __user *datap = (__force unsigned long __user *)data;
26885
26886 switch (request) {
26887 /* read the word at location addr in the USER area. */
26888@@ -907,14 +921,14 @@ long arch_ptrace(struct task_struct *child, long request,
26889 if ((int) addr < 0)
26890 return -EIO;
26891 ret = do_get_thread_area(child, addr,
26892- (struct user_desc __user *)data);
26893+ (__force struct user_desc __user *) data);
26894 break;
26895
26896 case PTRACE_SET_THREAD_AREA:
26897 if ((int) addr < 0)
26898 return -EIO;
26899 ret = do_set_thread_area(child, addr,
26900- (struct user_desc __user *)data, 0);
26901+ (__force struct user_desc __user *) data, 0);
26902 break;
26903 #endif
26904
26905@@ -1292,7 +1306,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
26906
26907 #ifdef CONFIG_X86_64
26908
26909-static struct user_regset x86_64_regsets[] __read_mostly = {
26910+static user_regset_no_const x86_64_regsets[] __read_only = {
26911 [REGSET_GENERAL] = {
26912 .core_note_type = NT_PRSTATUS,
26913 .n = sizeof(struct user_regs_struct) / sizeof(long),
26914@@ -1333,7 +1347,7 @@ static const struct user_regset_view user_x86_64_view = {
26915 #endif /* CONFIG_X86_64 */
26916
26917 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
26918-static struct user_regset x86_32_regsets[] __read_mostly = {
26919+static user_regset_no_const x86_32_regsets[] __read_only = {
26920 [REGSET_GENERAL] = {
26921 .core_note_type = NT_PRSTATUS,
26922 .n = sizeof(struct user_regs_struct32) / sizeof(u32),
26923@@ -1386,7 +1400,7 @@ static const struct user_regset_view user_x86_32_view = {
26924 */
26925 u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS];
26926
26927-void update_regset_xstate_info(unsigned int size, u64 xstate_mask)
26928+void __init update_regset_xstate_info(unsigned int size, u64 xstate_mask)
26929 {
26930 #ifdef CONFIG_X86_64
26931 x86_64_regsets[REGSET_XSTATE].n = size / sizeof(u64);
26932@@ -1421,7 +1435,7 @@ static void fill_sigtrap_info(struct task_struct *tsk,
26933 memset(info, 0, sizeof(*info));
26934 info->si_signo = SIGTRAP;
26935 info->si_code = si_code;
26936- info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
26937+ info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
26938 }
26939
26940 void user_single_step_siginfo(struct task_struct *tsk,
26941@@ -1455,6 +1469,10 @@ static void do_audit_syscall_entry(struct pt_regs *regs, u32 arch)
26942 }
26943 }
26944
26945+#ifdef CONFIG_GRKERNSEC_SETXID
26946+extern void gr_delayed_cred_worker(void);
26947+#endif
26948+
26949 /*
26950 * We can return 0 to resume the syscall or anything else to go to phase
26951 * 2. If we resume the syscall, we need to put something appropriate in
26952@@ -1562,6 +1580,11 @@ long syscall_trace_enter_phase2(struct pt_regs *regs, u32 arch,
26953
26954 BUG_ON(regs != task_pt_regs(current));
26955
26956+#ifdef CONFIG_GRKERNSEC_SETXID
26957+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
26958+ gr_delayed_cred_worker();
26959+#endif
26960+
26961 /*
26962 * If we stepped into a sysenter/syscall insn, it trapped in
26963 * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP.
26964@@ -1620,6 +1643,11 @@ void syscall_trace_leave(struct pt_regs *regs)
26965 */
26966 user_exit();
26967
26968+#ifdef CONFIG_GRKERNSEC_SETXID
26969+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
26970+ gr_delayed_cred_worker();
26971+#endif
26972+
26973 audit_syscall_exit(regs);
26974
26975 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
26976diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
26977index e5ecd20..60f7eef 100644
26978--- a/arch/x86/kernel/pvclock.c
26979+++ b/arch/x86/kernel/pvclock.c
26980@@ -51,11 +51,11 @@ void pvclock_touch_watchdogs(void)
26981 reset_hung_task_detector();
26982 }
26983
26984-static atomic64_t last_value = ATOMIC64_INIT(0);
26985+static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
26986
26987 void pvclock_resume(void)
26988 {
26989- atomic64_set(&last_value, 0);
26990+ atomic64_set_unchecked(&last_value, 0);
26991 }
26992
26993 u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src)
26994@@ -105,11 +105,11 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
26995 * updating at the same time, and one of them could be slightly behind,
26996 * making the assumption that last_value always go forward fail to hold.
26997 */
26998- last = atomic64_read(&last_value);
26999+ last = atomic64_read_unchecked(&last_value);
27000 do {
27001 if (ret < last)
27002 return last;
27003- last = atomic64_cmpxchg(&last_value, last, ret);
27004+ last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
27005 } while (unlikely(last != ret));
27006
27007 return ret;
27008diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
27009index 86db4bc..a50a54a 100644
27010--- a/arch/x86/kernel/reboot.c
27011+++ b/arch/x86/kernel/reboot.c
27012@@ -70,6 +70,11 @@ static int __init set_bios_reboot(const struct dmi_system_id *d)
27013
27014 void __noreturn machine_real_restart(unsigned int type)
27015 {
27016+
27017+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
27018+ struct desc_struct *gdt;
27019+#endif
27020+
27021 local_irq_disable();
27022
27023 /*
27024@@ -97,7 +102,29 @@ void __noreturn machine_real_restart(unsigned int type)
27025
27026 /* Jump to the identity-mapped low memory code */
27027 #ifdef CONFIG_X86_32
27028- asm volatile("jmpl *%0" : :
27029+
27030+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
27031+ gdt = get_cpu_gdt_table(smp_processor_id());
27032+ pax_open_kernel();
27033+#ifdef CONFIG_PAX_MEMORY_UDEREF
27034+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
27035+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
27036+ loadsegment(ds, __KERNEL_DS);
27037+ loadsegment(es, __KERNEL_DS);
27038+ loadsegment(ss, __KERNEL_DS);
27039+#endif
27040+#ifdef CONFIG_PAX_KERNEXEC
27041+ gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
27042+ gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
27043+ gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
27044+ gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
27045+ gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
27046+ gdt[GDT_ENTRY_KERNEL_CS].g = 1;
27047+#endif
27048+ pax_close_kernel();
27049+#endif
27050+
27051+ asm volatile("ljmpl *%0" : :
27052 "rm" (real_mode_header->machine_real_restart_asm),
27053 "a" (type));
27054 #else
27055@@ -137,7 +164,7 @@ static int __init set_kbd_reboot(const struct dmi_system_id *d)
27056 /*
27057 * This is a single dmi_table handling all reboot quirks.
27058 */
27059-static struct dmi_system_id __initdata reboot_dmi_table[] = {
27060+static const struct dmi_system_id __initconst reboot_dmi_table[] = {
27061
27062 /* Acer */
27063 { /* Handle reboot issue on Acer Aspire one */
27064@@ -511,7 +538,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
27065 * This means that this function can never return, it can misbehave
27066 * by not rebooting properly and hanging.
27067 */
27068-static void native_machine_emergency_restart(void)
27069+static void __noreturn native_machine_emergency_restart(void)
27070 {
27071 int i;
27072 int attempt = 0;
27073@@ -631,13 +658,13 @@ void native_machine_shutdown(void)
27074 #endif
27075 }
27076
27077-static void __machine_emergency_restart(int emergency)
27078+static void __noreturn __machine_emergency_restart(int emergency)
27079 {
27080 reboot_emergency = emergency;
27081 machine_ops.emergency_restart();
27082 }
27083
27084-static void native_machine_restart(char *__unused)
27085+static void __noreturn native_machine_restart(char *__unused)
27086 {
27087 pr_notice("machine restart\n");
27088
27089@@ -646,7 +673,7 @@ static void native_machine_restart(char *__unused)
27090 __machine_emergency_restart(0);
27091 }
27092
27093-static void native_machine_halt(void)
27094+static void __noreturn native_machine_halt(void)
27095 {
27096 /* Stop other cpus and apics */
27097 machine_shutdown();
27098@@ -656,7 +683,7 @@ static void native_machine_halt(void)
27099 stop_this_cpu(NULL);
27100 }
27101
27102-static void native_machine_power_off(void)
27103+static void __noreturn native_machine_power_off(void)
27104 {
27105 if (pm_power_off) {
27106 if (!reboot_force)
27107@@ -665,9 +692,10 @@ static void native_machine_power_off(void)
27108 }
27109 /* A fallback in case there is no PM info available */
27110 tboot_shutdown(TB_SHUTDOWN_HALT);
27111+ unreachable();
27112 }
27113
27114-struct machine_ops machine_ops = {
27115+struct machine_ops machine_ops __read_only = {
27116 .power_off = native_machine_power_off,
27117 .shutdown = native_machine_shutdown,
27118 .emergency_restart = native_machine_emergency_restart,
27119diff --git a/arch/x86/kernel/reboot_fixups_32.c b/arch/x86/kernel/reboot_fixups_32.c
27120index c8e41e9..64049ef 100644
27121--- a/arch/x86/kernel/reboot_fixups_32.c
27122+++ b/arch/x86/kernel/reboot_fixups_32.c
27123@@ -57,7 +57,7 @@ struct device_fixup {
27124 unsigned int vendor;
27125 unsigned int device;
27126 void (*reboot_fixup)(struct pci_dev *);
27127-};
27128+} __do_const;
27129
27130 /*
27131 * PCI ids solely used for fixups_table go here
27132diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
27133index 3fd2c69..a444264 100644
27134--- a/arch/x86/kernel/relocate_kernel_64.S
27135+++ b/arch/x86/kernel/relocate_kernel_64.S
27136@@ -96,8 +96,7 @@ relocate_kernel:
27137
27138 /* jump to identity mapped page */
27139 addq $(identity_mapped - relocate_kernel), %r8
27140- pushq %r8
27141- ret
27142+ jmp *%r8
27143
27144 identity_mapped:
27145 /* set return address to 0 if not preserving context */
27146diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
27147index 0a2421c..11f3f36 100644
27148--- a/arch/x86/kernel/setup.c
27149+++ b/arch/x86/kernel/setup.c
27150@@ -111,6 +111,7 @@
27151 #include <asm/mce.h>
27152 #include <asm/alternative.h>
27153 #include <asm/prom.h>
27154+#include <asm/boot.h>
27155
27156 /*
27157 * max_low_pfn_mapped: highest direct mapped pfn under 4GB
27158@@ -206,10 +207,12 @@ EXPORT_SYMBOL(boot_cpu_data);
27159 #endif
27160
27161
27162-#if !defined(CONFIG_X86_PAE) || defined(CONFIG_X86_64)
27163-__visible unsigned long mmu_cr4_features;
27164+#ifdef CONFIG_X86_64
27165+__visible unsigned long mmu_cr4_features __read_only = X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE;
27166+#elif defined(CONFIG_X86_PAE)
27167+__visible unsigned long mmu_cr4_features __read_only = X86_CR4_PAE;
27168 #else
27169-__visible unsigned long mmu_cr4_features = X86_CR4_PAE;
27170+__visible unsigned long mmu_cr4_features __read_only;
27171 #endif
27172
27173 /* Boot loader ID and version as integers, for the benefit of proc_dointvec */
27174@@ -771,7 +774,7 @@ static void __init trim_bios_range(void)
27175 * area (640->1Mb) as ram even though it is not.
27176 * take them out.
27177 */
27178- e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
27179+ e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
27180
27181 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
27182 }
27183@@ -779,7 +782,7 @@ static void __init trim_bios_range(void)
27184 /* called before trim_bios_range() to spare extra sanitize */
27185 static void __init e820_add_kernel_range(void)
27186 {
27187- u64 start = __pa_symbol(_text);
27188+ u64 start = __pa_symbol(ktla_ktva(_text));
27189 u64 size = __pa_symbol(_end) - start;
27190
27191 /*
27192@@ -855,8 +858,12 @@ dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p)
27193
27194 void __init setup_arch(char **cmdline_p)
27195 {
27196+#ifdef CONFIG_X86_32
27197+ memblock_reserve(LOAD_PHYSICAL_ADDR, __pa_symbol(__bss_stop) - LOAD_PHYSICAL_ADDR);
27198+#else
27199 memblock_reserve(__pa_symbol(_text),
27200 (unsigned long)__bss_stop - (unsigned long)_text);
27201+#endif
27202
27203 early_reserve_initrd();
27204
27205@@ -954,16 +961,16 @@ void __init setup_arch(char **cmdline_p)
27206
27207 if (!boot_params.hdr.root_flags)
27208 root_mountflags &= ~MS_RDONLY;
27209- init_mm.start_code = (unsigned long) _text;
27210- init_mm.end_code = (unsigned long) _etext;
27211+ init_mm.start_code = ktla_ktva((unsigned long) _text);
27212+ init_mm.end_code = ktla_ktva((unsigned long) _etext);
27213 init_mm.end_data = (unsigned long) _edata;
27214 init_mm.brk = _brk_end;
27215
27216 mpx_mm_init(&init_mm);
27217
27218- code_resource.start = __pa_symbol(_text);
27219- code_resource.end = __pa_symbol(_etext)-1;
27220- data_resource.start = __pa_symbol(_etext);
27221+ code_resource.start = __pa_symbol(ktla_ktva(_text));
27222+ code_resource.end = __pa_symbol(ktla_ktva(_etext))-1;
27223+ data_resource.start = __pa_symbol(_sdata);
27224 data_resource.end = __pa_symbol(_edata)-1;
27225 bss_resource.start = __pa_symbol(__bss_start);
27226 bss_resource.end = __pa_symbol(__bss_stop)-1;
27227diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
27228index e4fcb87..9c06c55 100644
27229--- a/arch/x86/kernel/setup_percpu.c
27230+++ b/arch/x86/kernel/setup_percpu.c
27231@@ -21,19 +21,17 @@
27232 #include <asm/cpu.h>
27233 #include <asm/stackprotector.h>
27234
27235-DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number);
27236+#ifdef CONFIG_SMP
27237+DEFINE_PER_CPU_READ_MOSTLY(unsigned int, cpu_number);
27238 EXPORT_PER_CPU_SYMBOL(cpu_number);
27239+#endif
27240
27241-#ifdef CONFIG_X86_64
27242 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
27243-#else
27244-#define BOOT_PERCPU_OFFSET 0
27245-#endif
27246
27247 DEFINE_PER_CPU_READ_MOSTLY(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
27248 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
27249
27250-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
27251+unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
27252 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
27253 };
27254 EXPORT_SYMBOL(__per_cpu_offset);
27255@@ -66,7 +64,7 @@ static bool __init pcpu_need_numa(void)
27256 {
27257 #ifdef CONFIG_NEED_MULTIPLE_NODES
27258 pg_data_t *last = NULL;
27259- unsigned int cpu;
27260+ int cpu;
27261
27262 for_each_possible_cpu(cpu) {
27263 int node = early_cpu_to_node(cpu);
27264@@ -155,10 +153,10 @@ static inline void setup_percpu_segment(int cpu)
27265 {
27266 #ifdef CONFIG_X86_32
27267 struct desc_struct gdt;
27268+ unsigned long base = per_cpu_offset(cpu);
27269
27270- pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
27271- 0x2 | DESCTYPE_S, 0x8);
27272- gdt.s = 1;
27273+ pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
27274+ 0x83 | DESCTYPE_S, 0xC);
27275 write_gdt_entry(get_cpu_gdt_table(cpu),
27276 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
27277 #endif
27278@@ -219,6 +217,11 @@ void __init setup_per_cpu_areas(void)
27279 /* alrighty, percpu areas up and running */
27280 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
27281 for_each_possible_cpu(cpu) {
27282+#ifdef CONFIG_CC_STACKPROTECTOR
27283+#ifdef CONFIG_X86_32
27284+ unsigned long canary = per_cpu(stack_canary.canary, cpu);
27285+#endif
27286+#endif
27287 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
27288 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
27289 per_cpu(cpu_number, cpu) = cpu;
27290@@ -259,6 +262,12 @@ void __init setup_per_cpu_areas(void)
27291 */
27292 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
27293 #endif
27294+#ifdef CONFIG_CC_STACKPROTECTOR
27295+#ifdef CONFIG_X86_32
27296+ if (!cpu)
27297+ per_cpu(stack_canary.canary, cpu) = canary;
27298+#endif
27299+#endif
27300 /*
27301 * Up to this point, the boot CPU has been using .init.data
27302 * area. Reload any changed state for the boot CPU.
27303diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
27304index e504246..ba10432 100644
27305--- a/arch/x86/kernel/signal.c
27306+++ b/arch/x86/kernel/signal.c
27307@@ -190,7 +190,7 @@ static unsigned long align_sigframe(unsigned long sp)
27308 * Align the stack pointer according to the i386 ABI,
27309 * i.e. so that on function entry ((sp + 4) & 15) == 0.
27310 */
27311- sp = ((sp + 4) & -16ul) - 4;
27312+ sp = ((sp - 12) & -16ul) - 4;
27313 #else /* !CONFIG_X86_32 */
27314 sp = round_down(sp, 16) - 8;
27315 #endif
27316@@ -298,10 +298,9 @@ __setup_frame(int sig, struct ksignal *ksig, sigset_t *set,
27317 }
27318
27319 if (current->mm->context.vdso)
27320- restorer = current->mm->context.vdso +
27321- selected_vdso32->sym___kernel_sigreturn;
27322+ restorer = (void __force_user *)(current->mm->context.vdso + selected_vdso32->sym___kernel_sigreturn);
27323 else
27324- restorer = &frame->retcode;
27325+ restorer = (void __user *)&frame->retcode;
27326 if (ksig->ka.sa.sa_flags & SA_RESTORER)
27327 restorer = ksig->ka.sa.sa_restorer;
27328
27329@@ -315,7 +314,7 @@ __setup_frame(int sig, struct ksignal *ksig, sigset_t *set,
27330 * reasons and because gdb uses it as a signature to notice
27331 * signal handler stack frames.
27332 */
27333- err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
27334+ err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
27335
27336 if (err)
27337 return -EFAULT;
27338@@ -362,8 +361,10 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
27339 save_altstack_ex(&frame->uc.uc_stack, regs->sp);
27340
27341 /* Set up to return from userspace. */
27342- restorer = current->mm->context.vdso +
27343- selected_vdso32->sym___kernel_rt_sigreturn;
27344+ if (current->mm->context.vdso)
27345+ restorer = (void __force_user *)(current->mm->context.vdso + selected_vdso32->sym___kernel_rt_sigreturn);
27346+ else
27347+ restorer = (void __user *)&frame->retcode;
27348 if (ksig->ka.sa.sa_flags & SA_RESTORER)
27349 restorer = ksig->ka.sa.sa_restorer;
27350 put_user_ex(restorer, &frame->pretcode);
27351@@ -375,7 +376,7 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
27352 * reasons and because gdb uses it as a signature to notice
27353 * signal handler stack frames.
27354 */
27355- put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
27356+ put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
27357 } put_user_catch(err);
27358
27359 err |= copy_siginfo_to_user(&frame->info, &ksig->info);
27360@@ -611,7 +612,12 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
27361 {
27362 int usig = signr_convert(ksig->sig);
27363 sigset_t *set = sigmask_to_save();
27364- compat_sigset_t *cset = (compat_sigset_t *) set;
27365+ sigset_t sigcopy;
27366+ compat_sigset_t *cset;
27367+
27368+ sigcopy = *set;
27369+
27370+ cset = (compat_sigset_t *) &sigcopy;
27371
27372 /* Set up the stack frame */
27373 if (is_ia32_frame()) {
27374@@ -622,7 +628,7 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
27375 } else if (is_x32_frame()) {
27376 return x32_setup_rt_frame(ksig, cset, regs);
27377 } else {
27378- return __setup_rt_frame(ksig->sig, ksig, set, regs);
27379+ return __setup_rt_frame(ksig->sig, ksig, &sigcopy, regs);
27380 }
27381 }
27382
27383diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
27384index be8e1bd..a3d93fa 100644
27385--- a/arch/x86/kernel/smp.c
27386+++ b/arch/x86/kernel/smp.c
27387@@ -341,7 +341,7 @@ static int __init nonmi_ipi_setup(char *str)
27388
27389 __setup("nonmi_ipi", nonmi_ipi_setup);
27390
27391-struct smp_ops smp_ops = {
27392+struct smp_ops smp_ops __read_only = {
27393 .smp_prepare_boot_cpu = native_smp_prepare_boot_cpu,
27394 .smp_prepare_cpus = native_smp_prepare_cpus,
27395 .smp_cpus_done = native_smp_cpus_done,
27396diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
27397index febc6aa..37d8edf 100644
27398--- a/arch/x86/kernel/smpboot.c
27399+++ b/arch/x86/kernel/smpboot.c
27400@@ -229,14 +229,17 @@ static void notrace start_secondary(void *unused)
27401
27402 enable_start_cpu0 = 0;
27403
27404-#ifdef CONFIG_X86_32
27405+ /* otherwise gcc will move up smp_processor_id before the cpu_init */
27406+ barrier();
27407+
27408 /* switch away from the initial page table */
27409+#ifdef CONFIG_PAX_PER_CPU_PGD
27410+ load_cr3(get_cpu_pgd(smp_processor_id(), kernel));
27411+#else
27412 load_cr3(swapper_pg_dir);
27413+#endif
27414 __flush_tlb_all();
27415-#endif
27416
27417- /* otherwise gcc will move up smp_processor_id before the cpu_init */
27418- barrier();
27419 /*
27420 * Check TSC synchronization with the BP:
27421 */
27422@@ -800,8 +803,9 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
27423 alternatives_enable_smp();
27424
27425 idle->thread.sp = (unsigned long) (((struct pt_regs *)
27426- (THREAD_SIZE + task_stack_page(idle))) - 1);
27427+ (THREAD_SIZE - 16 + task_stack_page(idle))) - 1);
27428 per_cpu(current_task, cpu) = idle;
27429+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
27430
27431 #ifdef CONFIG_X86_32
27432 /* Stack for startup_32 can be just as for start_secondary onwards */
27433@@ -810,10 +814,10 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
27434 clear_tsk_thread_flag(idle, TIF_FORK);
27435 initial_gs = per_cpu_offset(cpu);
27436 #endif
27437- per_cpu(kernel_stack, cpu) =
27438- (unsigned long)task_stack_page(idle) -
27439- KERNEL_STACK_OFFSET + THREAD_SIZE;
27440+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
27441+ pax_open_kernel();
27442 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
27443+ pax_close_kernel();
27444 initial_code = (unsigned long)start_secondary;
27445 stack_start = idle->thread.sp;
27446
27447@@ -953,6 +957,15 @@ int native_cpu_up(unsigned int cpu, struct task_struct *tidle)
27448 /* the FPU context is blank, nobody can own it */
27449 __cpu_disable_lazy_restore(cpu);
27450
27451+#ifdef CONFIG_PAX_PER_CPU_PGD
27452+ clone_pgd_range(get_cpu_pgd(cpu, kernel) + KERNEL_PGD_BOUNDARY,
27453+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
27454+ KERNEL_PGD_PTRS);
27455+ clone_pgd_range(get_cpu_pgd(cpu, user) + KERNEL_PGD_BOUNDARY,
27456+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
27457+ KERNEL_PGD_PTRS);
27458+#endif
27459+
27460 err = do_boot_cpu(apicid, cpu, tidle);
27461 if (err) {
27462 pr_err("do_boot_cpu failed(%d) to wakeup CPU#%u\n", err, cpu);
27463diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
27464index 9b4d51d..5d28b58 100644
27465--- a/arch/x86/kernel/step.c
27466+++ b/arch/x86/kernel/step.c
27467@@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
27468 struct desc_struct *desc;
27469 unsigned long base;
27470
27471- seg &= ~7UL;
27472+ seg >>= 3;
27473
27474 mutex_lock(&child->mm->context.lock);
27475- if (unlikely((seg >> 3) >= child->mm->context.size))
27476+ if (unlikely(seg >= child->mm->context.size))
27477 addr = -1L; /* bogus selector, access would fault */
27478 else {
27479 desc = child->mm->context.ldt + seg;
27480@@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
27481 addr += base;
27482 }
27483 mutex_unlock(&child->mm->context.lock);
27484- }
27485+ } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
27486+ addr = ktla_ktva(addr);
27487
27488 return addr;
27489 }
27490@@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
27491 unsigned char opcode[15];
27492 unsigned long addr = convert_ip_to_linear(child, regs);
27493
27494+ if (addr == -EINVAL)
27495+ return 0;
27496+
27497 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
27498 for (i = 0; i < copied; i++) {
27499 switch (opcode[i]) {
27500diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
27501new file mode 100644
27502index 0000000..5877189
27503--- /dev/null
27504+++ b/arch/x86/kernel/sys_i386_32.c
27505@@ -0,0 +1,189 @@
27506+/*
27507+ * This file contains various random system calls that
27508+ * have a non-standard calling sequence on the Linux/i386
27509+ * platform.
27510+ */
27511+
27512+#include <linux/errno.h>
27513+#include <linux/sched.h>
27514+#include <linux/mm.h>
27515+#include <linux/fs.h>
27516+#include <linux/smp.h>
27517+#include <linux/sem.h>
27518+#include <linux/msg.h>
27519+#include <linux/shm.h>
27520+#include <linux/stat.h>
27521+#include <linux/syscalls.h>
27522+#include <linux/mman.h>
27523+#include <linux/file.h>
27524+#include <linux/utsname.h>
27525+#include <linux/ipc.h>
27526+#include <linux/elf.h>
27527+
27528+#include <linux/uaccess.h>
27529+#include <linux/unistd.h>
27530+
27531+#include <asm/syscalls.h>
27532+
27533+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
27534+{
27535+ unsigned long pax_task_size = TASK_SIZE;
27536+
27537+#ifdef CONFIG_PAX_SEGMEXEC
27538+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
27539+ pax_task_size = SEGMEXEC_TASK_SIZE;
27540+#endif
27541+
27542+ if (flags & MAP_FIXED)
27543+ if (len > pax_task_size || addr > pax_task_size - len)
27544+ return -EINVAL;
27545+
27546+ return 0;
27547+}
27548+
27549+/*
27550+ * Align a virtual address to avoid aliasing in the I$ on AMD F15h.
27551+ */
27552+static unsigned long get_align_mask(void)
27553+{
27554+ if (va_align.flags < 0 || !(va_align.flags & ALIGN_VA_32))
27555+ return 0;
27556+
27557+ if (!(current->flags & PF_RANDOMIZE))
27558+ return 0;
27559+
27560+ return va_align.mask;
27561+}
27562+
27563+unsigned long
27564+arch_get_unmapped_area(struct file *filp, unsigned long addr,
27565+ unsigned long len, unsigned long pgoff, unsigned long flags)
27566+{
27567+ struct mm_struct *mm = current->mm;
27568+ struct vm_area_struct *vma;
27569+ unsigned long pax_task_size = TASK_SIZE;
27570+ struct vm_unmapped_area_info info;
27571+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
27572+
27573+#ifdef CONFIG_PAX_SEGMEXEC
27574+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
27575+ pax_task_size = SEGMEXEC_TASK_SIZE;
27576+#endif
27577+
27578+ pax_task_size -= PAGE_SIZE;
27579+
27580+ if (len > pax_task_size)
27581+ return -ENOMEM;
27582+
27583+ if (flags & MAP_FIXED)
27584+ return addr;
27585+
27586+#ifdef CONFIG_PAX_RANDMMAP
27587+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
27588+#endif
27589+
27590+ if (addr) {
27591+ addr = PAGE_ALIGN(addr);
27592+ if (pax_task_size - len >= addr) {
27593+ vma = find_vma(mm, addr);
27594+ if (check_heap_stack_gap(vma, addr, len, offset))
27595+ return addr;
27596+ }
27597+ }
27598+
27599+ info.flags = 0;
27600+ info.length = len;
27601+ info.align_mask = filp ? get_align_mask() : 0;
27602+ info.align_offset = pgoff << PAGE_SHIFT;
27603+ info.threadstack_offset = offset;
27604+
27605+#ifdef CONFIG_PAX_PAGEEXEC
27606+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE)) {
27607+ info.low_limit = 0x00110000UL;
27608+ info.high_limit = mm->start_code;
27609+
27610+#ifdef CONFIG_PAX_RANDMMAP
27611+ if (mm->pax_flags & MF_PAX_RANDMMAP)
27612+ info.low_limit += mm->delta_mmap & 0x03FFF000UL;
27613+#endif
27614+
27615+ if (info.low_limit < info.high_limit) {
27616+ addr = vm_unmapped_area(&info);
27617+ if (!IS_ERR_VALUE(addr))
27618+ return addr;
27619+ }
27620+ } else
27621+#endif
27622+
27623+ info.low_limit = mm->mmap_base;
27624+ info.high_limit = pax_task_size;
27625+
27626+ return vm_unmapped_area(&info);
27627+}
27628+
27629+unsigned long
27630+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
27631+ const unsigned long len, const unsigned long pgoff,
27632+ const unsigned long flags)
27633+{
27634+ struct vm_area_struct *vma;
27635+ struct mm_struct *mm = current->mm;
27636+ unsigned long addr = addr0, pax_task_size = TASK_SIZE;
27637+ struct vm_unmapped_area_info info;
27638+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
27639+
27640+#ifdef CONFIG_PAX_SEGMEXEC
27641+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
27642+ pax_task_size = SEGMEXEC_TASK_SIZE;
27643+#endif
27644+
27645+ pax_task_size -= PAGE_SIZE;
27646+
27647+ /* requested length too big for entire address space */
27648+ if (len > pax_task_size)
27649+ return -ENOMEM;
27650+
27651+ if (flags & MAP_FIXED)
27652+ return addr;
27653+
27654+#ifdef CONFIG_PAX_PAGEEXEC
27655+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
27656+ goto bottomup;
27657+#endif
27658+
27659+#ifdef CONFIG_PAX_RANDMMAP
27660+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
27661+#endif
27662+
27663+ /* requesting a specific address */
27664+ if (addr) {
27665+ addr = PAGE_ALIGN(addr);
27666+ if (pax_task_size - len >= addr) {
27667+ vma = find_vma(mm, addr);
27668+ if (check_heap_stack_gap(vma, addr, len, offset))
27669+ return addr;
27670+ }
27671+ }
27672+
27673+ info.flags = VM_UNMAPPED_AREA_TOPDOWN;
27674+ info.length = len;
27675+ info.low_limit = PAGE_SIZE;
27676+ info.high_limit = mm->mmap_base;
27677+ info.align_mask = filp ? get_align_mask() : 0;
27678+ info.align_offset = pgoff << PAGE_SHIFT;
27679+ info.threadstack_offset = offset;
27680+
27681+ addr = vm_unmapped_area(&info);
27682+ if (!(addr & ~PAGE_MASK))
27683+ return addr;
27684+ VM_BUG_ON(addr != -ENOMEM);
27685+
27686+bottomup:
27687+ /*
27688+ * A failed mmap() very likely causes application failure,
27689+ * so fall back to the bottom-up function here. This scenario
27690+ * can happen with large stack limits and large mmap()
27691+ * allocations.
27692+ */
27693+ return arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
27694+}
27695diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
27696index 30277e2..5664a29 100644
27697--- a/arch/x86/kernel/sys_x86_64.c
27698+++ b/arch/x86/kernel/sys_x86_64.c
27699@@ -81,8 +81,8 @@ out:
27700 return error;
27701 }
27702
27703-static void find_start_end(unsigned long flags, unsigned long *begin,
27704- unsigned long *end)
27705+static void find_start_end(struct mm_struct *mm, unsigned long flags,
27706+ unsigned long *begin, unsigned long *end)
27707 {
27708 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT)) {
27709 unsigned long new_begin;
27710@@ -101,7 +101,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
27711 *begin = new_begin;
27712 }
27713 } else {
27714- *begin = current->mm->mmap_legacy_base;
27715+ *begin = mm->mmap_legacy_base;
27716 *end = TASK_SIZE;
27717 }
27718 }
27719@@ -114,20 +114,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
27720 struct vm_area_struct *vma;
27721 struct vm_unmapped_area_info info;
27722 unsigned long begin, end;
27723+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
27724
27725 if (flags & MAP_FIXED)
27726 return addr;
27727
27728- find_start_end(flags, &begin, &end);
27729+ find_start_end(mm, flags, &begin, &end);
27730
27731 if (len > end)
27732 return -ENOMEM;
27733
27734+#ifdef CONFIG_PAX_RANDMMAP
27735+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
27736+#endif
27737+
27738 if (addr) {
27739 addr = PAGE_ALIGN(addr);
27740 vma = find_vma(mm, addr);
27741- if (end - len >= addr &&
27742- (!vma || addr + len <= vma->vm_start))
27743+ if (end - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
27744 return addr;
27745 }
27746
27747@@ -137,6 +141,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
27748 info.high_limit = end;
27749 info.align_mask = filp ? get_align_mask() : 0;
27750 info.align_offset = pgoff << PAGE_SHIFT;
27751+ info.threadstack_offset = offset;
27752 return vm_unmapped_area(&info);
27753 }
27754
27755@@ -149,6 +154,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
27756 struct mm_struct *mm = current->mm;
27757 unsigned long addr = addr0;
27758 struct vm_unmapped_area_info info;
27759+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
27760
27761 /* requested length too big for entire address space */
27762 if (len > TASK_SIZE)
27763@@ -161,12 +167,15 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
27764 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT))
27765 goto bottomup;
27766
27767+#ifdef CONFIG_PAX_RANDMMAP
27768+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
27769+#endif
27770+
27771 /* requesting a specific address */
27772 if (addr) {
27773 addr = PAGE_ALIGN(addr);
27774 vma = find_vma(mm, addr);
27775- if (TASK_SIZE - len >= addr &&
27776- (!vma || addr + len <= vma->vm_start))
27777+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
27778 return addr;
27779 }
27780
27781@@ -176,6 +185,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
27782 info.high_limit = mm->mmap_base;
27783 info.align_mask = filp ? get_align_mask() : 0;
27784 info.align_offset = pgoff << PAGE_SHIFT;
27785+ info.threadstack_offset = offset;
27786 addr = vm_unmapped_area(&info);
27787 if (!(addr & ~PAGE_MASK))
27788 return addr;
27789diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
27790index 91a4496..42fc304 100644
27791--- a/arch/x86/kernel/tboot.c
27792+++ b/arch/x86/kernel/tboot.c
27793@@ -44,6 +44,7 @@
27794 #include <asm/setup.h>
27795 #include <asm/e820.h>
27796 #include <asm/io.h>
27797+#include <asm/tlbflush.h>
27798
27799 #include "../realmode/rm/wakeup.h"
27800
27801@@ -221,7 +222,7 @@ static int tboot_setup_sleep(void)
27802
27803 void tboot_shutdown(u32 shutdown_type)
27804 {
27805- void (*shutdown)(void);
27806+ void (* __noreturn shutdown)(void);
27807
27808 if (!tboot_enabled())
27809 return;
27810@@ -242,8 +243,9 @@ void tboot_shutdown(u32 shutdown_type)
27811 tboot->shutdown_type = shutdown_type;
27812
27813 switch_to_tboot_pt();
27814+ cr4_clear_bits(X86_CR4_PCIDE);
27815
27816- shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
27817+ shutdown = (void *)(unsigned long)tboot->shutdown_entry;
27818 shutdown();
27819
27820 /* should not reach here */
27821@@ -310,7 +312,7 @@ static int tboot_extended_sleep(u8 sleep_state, u32 val_a, u32 val_b)
27822 return -ENODEV;
27823 }
27824
27825-static atomic_t ap_wfs_count;
27826+static atomic_unchecked_t ap_wfs_count;
27827
27828 static int tboot_wait_for_aps(int num_aps)
27829 {
27830@@ -334,9 +336,9 @@ static int tboot_cpu_callback(struct notifier_block *nfb, unsigned long action,
27831 {
27832 switch (action) {
27833 case CPU_DYING:
27834- atomic_inc(&ap_wfs_count);
27835+ atomic_inc_unchecked(&ap_wfs_count);
27836 if (num_online_cpus() == 1)
27837- if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
27838+ if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
27839 return NOTIFY_BAD;
27840 break;
27841 }
27842@@ -422,7 +424,7 @@ static __init int tboot_late_init(void)
27843
27844 tboot_create_trampoline();
27845
27846- atomic_set(&ap_wfs_count, 0);
27847+ atomic_set_unchecked(&ap_wfs_count, 0);
27848 register_hotcpu_notifier(&tboot_cpu_notifier);
27849
27850 #ifdef CONFIG_DEBUG_FS
27851diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
27852index 25adc0e..1df4349 100644
27853--- a/arch/x86/kernel/time.c
27854+++ b/arch/x86/kernel/time.c
27855@@ -30,9 +30,9 @@ unsigned long profile_pc(struct pt_regs *regs)
27856 {
27857 unsigned long pc = instruction_pointer(regs);
27858
27859- if (!user_mode_vm(regs) && in_lock_functions(pc)) {
27860+ if (!user_mode(regs) && in_lock_functions(pc)) {
27861 #ifdef CONFIG_FRAME_POINTER
27862- return *(unsigned long *)(regs->bp + sizeof(long));
27863+ return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
27864 #else
27865 unsigned long *sp =
27866 (unsigned long *)kernel_stack_pointer(regs);
27867@@ -41,11 +41,17 @@ unsigned long profile_pc(struct pt_regs *regs)
27868 * or above a saved flags. Eflags has bits 22-31 zero,
27869 * kernel addresses don't.
27870 */
27871+
27872+#ifdef CONFIG_PAX_KERNEXEC
27873+ return ktla_ktva(sp[0]);
27874+#else
27875 if (sp[0] >> 22)
27876 return sp[0];
27877 if (sp[1] >> 22)
27878 return sp[1];
27879 #endif
27880+
27881+#endif
27882 }
27883 return pc;
27884 }
27885diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
27886index 7fc5e84..c6e445a 100644
27887--- a/arch/x86/kernel/tls.c
27888+++ b/arch/x86/kernel/tls.c
27889@@ -139,6 +139,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
27890 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
27891 return -EINVAL;
27892
27893+#ifdef CONFIG_PAX_SEGMEXEC
27894+ if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
27895+ return -EINVAL;
27896+#endif
27897+
27898 set_tls_desc(p, idx, &info, 1);
27899
27900 return 0;
27901@@ -256,7 +261,7 @@ int regset_tls_set(struct task_struct *target, const struct user_regset *regset,
27902
27903 if (kbuf)
27904 info = kbuf;
27905- else if (__copy_from_user(infobuf, ubuf, count))
27906+ else if (count > sizeof infobuf || __copy_from_user(infobuf, ubuf, count))
27907 return -EFAULT;
27908 else
27909 info = infobuf;
27910diff --git a/arch/x86/kernel/tracepoint.c b/arch/x86/kernel/tracepoint.c
27911index 1c113db..287b42e 100644
27912--- a/arch/x86/kernel/tracepoint.c
27913+++ b/arch/x86/kernel/tracepoint.c
27914@@ -9,11 +9,11 @@
27915 #include <linux/atomic.h>
27916
27917 atomic_t trace_idt_ctr = ATOMIC_INIT(0);
27918-struct desc_ptr trace_idt_descr = { NR_VECTORS * 16 - 1,
27919+const struct desc_ptr trace_idt_descr = { NR_VECTORS * 16 - 1,
27920 (unsigned long) trace_idt_table };
27921
27922 /* No need to be aligned, but done to keep all IDTs defined the same way. */
27923-gate_desc trace_idt_table[NR_VECTORS] __page_aligned_bss;
27924+gate_desc trace_idt_table[NR_VECTORS] __page_aligned_rodata;
27925
27926 static int trace_irq_vector_refcount;
27927 static DEFINE_MUTEX(irq_vector_mutex);
27928diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
27929index 4ff5d16..736e3e1 100644
27930--- a/arch/x86/kernel/traps.c
27931+++ b/arch/x86/kernel/traps.c
27932@@ -68,7 +68,7 @@
27933 #include <asm/proto.h>
27934
27935 /* No need to be aligned, but done to keep all IDTs defined the same way. */
27936-gate_desc debug_idt_table[NR_VECTORS] __page_aligned_bss;
27937+gate_desc debug_idt_table[NR_VECTORS] __page_aligned_rodata;
27938 #else
27939 #include <asm/processor-flags.h>
27940 #include <asm/setup.h>
27941@@ -77,7 +77,7 @@ asmlinkage int system_call(void);
27942 #endif
27943
27944 /* Must be page-aligned because the real IDT is used in a fixmap. */
27945-gate_desc idt_table[NR_VECTORS] __page_aligned_bss;
27946+gate_desc idt_table[NR_VECTORS] __page_aligned_rodata;
27947
27948 DECLARE_BITMAP(used_vectors, NR_VECTORS);
27949 EXPORT_SYMBOL_GPL(used_vectors);
27950@@ -112,7 +112,7 @@ enum ctx_state ist_enter(struct pt_regs *regs)
27951 {
27952 enum ctx_state prev_state;
27953
27954- if (user_mode_vm(regs)) {
27955+ if (user_mode(regs)) {
27956 /* Other than that, we're just an exception. */
27957 prev_state = exception_enter();
27958 } else {
27959@@ -146,7 +146,7 @@ void ist_exit(struct pt_regs *regs, enum ctx_state prev_state)
27960 /* Must be before exception_exit. */
27961 preempt_count_sub(HARDIRQ_OFFSET);
27962
27963- if (user_mode_vm(regs))
27964+ if (user_mode(regs))
27965 return exception_exit(prev_state);
27966 else
27967 rcu_nmi_exit();
27968@@ -158,7 +158,7 @@ void ist_exit(struct pt_regs *regs, enum ctx_state prev_state)
27969 *
27970 * IST exception handlers normally cannot schedule. As a special
27971 * exception, if the exception interrupted userspace code (i.e.
27972- * user_mode_vm(regs) would return true) and the exception was not
27973+ * user_mode(regs) would return true) and the exception was not
27974 * a double fault, it can be safe to schedule. ist_begin_non_atomic()
27975 * begins a non-atomic section within an ist_enter()/ist_exit() region.
27976 * Callers are responsible for enabling interrupts themselves inside
27977@@ -167,7 +167,7 @@ void ist_exit(struct pt_regs *regs, enum ctx_state prev_state)
27978 */
27979 void ist_begin_non_atomic(struct pt_regs *regs)
27980 {
27981- BUG_ON(!user_mode_vm(regs));
27982+ BUG_ON(!user_mode(regs));
27983
27984 /*
27985 * Sanity check: we need to be on the normal thread stack. This
27986@@ -191,11 +191,11 @@ void ist_end_non_atomic(void)
27987 }
27988
27989 static nokprobe_inline int
27990-do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
27991+do_trap_no_signal(struct task_struct *tsk, int trapnr, const char *str,
27992 struct pt_regs *regs, long error_code)
27993 {
27994 #ifdef CONFIG_X86_32
27995- if (regs->flags & X86_VM_MASK) {
27996+ if (v8086_mode(regs)) {
27997 /*
27998 * Traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
27999 * On nmi (interrupt 2), do_trap should not be called.
28000@@ -208,12 +208,24 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
28001 return -1;
28002 }
28003 #endif
28004- if (!user_mode(regs)) {
28005+ if (!user_mode_novm(regs)) {
28006 if (!fixup_exception(regs)) {
28007 tsk->thread.error_code = error_code;
28008 tsk->thread.trap_nr = trapnr;
28009+
28010+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
28011+ if (trapnr == X86_TRAP_SS && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
28012+ str = "PAX: suspicious stack segment fault";
28013+#endif
28014+
28015 die(str, regs, error_code);
28016 }
28017+
28018+#ifdef CONFIG_PAX_REFCOUNT
28019+ if (trapnr == X86_TRAP_OF)
28020+ pax_report_refcount_overflow(regs);
28021+#endif
28022+
28023 return 0;
28024 }
28025
28026@@ -252,7 +264,7 @@ static siginfo_t *fill_trap_info(struct pt_regs *regs, int signr, int trapnr,
28027 }
28028
28029 static void
28030-do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
28031+do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
28032 long error_code, siginfo_t *info)
28033 {
28034 struct task_struct *tsk = current;
28035@@ -276,7 +288,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
28036 if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
28037 printk_ratelimit()) {
28038 pr_info("%s[%d] trap %s ip:%lx sp:%lx error:%lx",
28039- tsk->comm, tsk->pid, str,
28040+ tsk->comm, task_pid_nr(tsk), str,
28041 regs->ip, regs->sp, error_code);
28042 print_vma_addr(" in ", regs->ip);
28043 pr_cont("\n");
28044@@ -358,6 +370,11 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
28045 tsk->thread.error_code = error_code;
28046 tsk->thread.trap_nr = X86_TRAP_DF;
28047
28048+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
28049+ if ((unsigned long)tsk->stack - regs->sp <= PAGE_SIZE)
28050+ die("grsec: kernel stack overflow detected", regs, error_code);
28051+#endif
28052+
28053 #ifdef CONFIG_DOUBLEFAULT
28054 df_debug(regs, error_code);
28055 #endif
28056@@ -384,7 +401,7 @@ dotraplinkage void do_bounds(struct pt_regs *regs, long error_code)
28057 goto exit;
28058 conditional_sti(regs);
28059
28060- if (!user_mode_vm(regs))
28061+ if (!user_mode(regs))
28062 die("bounds", regs, error_code);
28063
28064 if (!cpu_feature_enabled(X86_FEATURE_MPX)) {
28065@@ -463,7 +480,7 @@ do_general_protection(struct pt_regs *regs, long error_code)
28066 conditional_sti(regs);
28067
28068 #ifdef CONFIG_X86_32
28069- if (regs->flags & X86_VM_MASK) {
28070+ if (v8086_mode(regs)) {
28071 local_irq_enable();
28072 handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
28073 goto exit;
28074@@ -471,18 +488,42 @@ do_general_protection(struct pt_regs *regs, long error_code)
28075 #endif
28076
28077 tsk = current;
28078- if (!user_mode(regs)) {
28079+ if (!user_mode_novm(regs)) {
28080 if (fixup_exception(regs))
28081 goto exit;
28082
28083 tsk->thread.error_code = error_code;
28084 tsk->thread.trap_nr = X86_TRAP_GP;
28085 if (notify_die(DIE_GPF, "general protection fault", regs, error_code,
28086- X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP)
28087+ X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP) {
28088+
28089+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
28090+ if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
28091+ die("PAX: suspicious general protection fault", regs, error_code);
28092+ else
28093+#endif
28094+
28095 die("general protection fault", regs, error_code);
28096+ }
28097 goto exit;
28098 }
28099
28100+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
28101+ if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
28102+ struct mm_struct *mm = tsk->mm;
28103+ unsigned long limit;
28104+
28105+ down_write(&mm->mmap_sem);
28106+ limit = mm->context.user_cs_limit;
28107+ if (limit < TASK_SIZE) {
28108+ track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
28109+ up_write(&mm->mmap_sem);
28110+ return;
28111+ }
28112+ up_write(&mm->mmap_sem);
28113+ }
28114+#endif
28115+
28116 tsk->thread.error_code = error_code;
28117 tsk->thread.trap_nr = X86_TRAP_GP;
28118
28119@@ -581,13 +622,16 @@ struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s)
28120 container_of(task_pt_regs(current),
28121 struct bad_iret_stack, regs);
28122
28123+ if ((current->thread.sp0 ^ (unsigned long)s) < THREAD_SIZE)
28124+ new_stack = s;
28125+
28126 /* Copy the IRET target to the new stack. */
28127 memmove(&new_stack->regs.ip, (void *)s->regs.sp, 5*8);
28128
28129 /* Copy the remainder of the stack from the current stack. */
28130 memmove(new_stack, s, offsetof(struct bad_iret_stack, regs.ip));
28131
28132- BUG_ON(!user_mode_vm(&new_stack->regs));
28133+ BUG_ON(!user_mode(&new_stack->regs));
28134 return new_stack;
28135 }
28136 NOKPROBE_SYMBOL(fixup_bad_iret);
28137@@ -637,7 +681,7 @@ dotraplinkage void do_debug(struct pt_regs *regs, long error_code)
28138 * then it's very likely the result of an icebp/int01 trap.
28139 * User wants a sigtrap for that.
28140 */
28141- if (!dr6 && user_mode_vm(regs))
28142+ if (!dr6 && user_mode(regs))
28143 user_icebp = 1;
28144
28145 /* Catch kmemcheck conditions first of all! */
28146@@ -673,7 +717,7 @@ dotraplinkage void do_debug(struct pt_regs *regs, long error_code)
28147 /* It's safe to allow irq's after DR6 has been saved */
28148 preempt_conditional_sti(regs);
28149
28150- if (regs->flags & X86_VM_MASK) {
28151+ if (v8086_mode(regs)) {
28152 handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
28153 X86_TRAP_DB);
28154 preempt_conditional_cli(regs);
28155@@ -688,7 +732,7 @@ dotraplinkage void do_debug(struct pt_regs *regs, long error_code)
28156 * We already checked v86 mode above, so we can check for kernel mode
28157 * by just checking the CPL of CS.
28158 */
28159- if ((dr6 & DR_STEP) && !user_mode(regs)) {
28160+ if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
28161 tsk->thread.debugreg6 &= ~DR_STEP;
28162 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
28163 regs->flags &= ~X86_EFLAGS_TF;
28164@@ -721,7 +765,7 @@ static void math_error(struct pt_regs *regs, int error_code, int trapnr)
28165 return;
28166 conditional_sti(regs);
28167
28168- if (!user_mode_vm(regs))
28169+ if (!user_mode(regs))
28170 {
28171 if (!fixup_exception(regs)) {
28172 task->thread.error_code = error_code;
28173diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
28174index 5054497..139f8f8 100644
28175--- a/arch/x86/kernel/tsc.c
28176+++ b/arch/x86/kernel/tsc.c
28177@@ -150,7 +150,7 @@ static void cyc2ns_write_end(int cpu, struct cyc2ns_data *data)
28178 */
28179 smp_wmb();
28180
28181- ACCESS_ONCE(c2n->head) = data;
28182+ ACCESS_ONCE_RW(c2n->head) = data;
28183 }
28184
28185 /*
28186diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
28187index 81f8adb0..fff670e 100644
28188--- a/arch/x86/kernel/uprobes.c
28189+++ b/arch/x86/kernel/uprobes.c
28190@@ -912,7 +912,7 @@ int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val,
28191 int ret = NOTIFY_DONE;
28192
28193 /* We are only interested in userspace traps */
28194- if (regs && !user_mode_vm(regs))
28195+ if (regs && !user_mode(regs))
28196 return NOTIFY_DONE;
28197
28198 switch (val) {
28199@@ -986,7 +986,7 @@ arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs
28200
28201 if (nleft != rasize) {
28202 pr_err("uprobe: return address clobbered: pid=%d, %%sp=%#lx, "
28203- "%%ip=%#lx\n", current->pid, regs->sp, regs->ip);
28204+ "%%ip=%#lx\n", task_pid_nr(current), regs->sp, regs->ip);
28205
28206 force_sig_info(SIGSEGV, SEND_SIG_FORCED, current);
28207 }
28208diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
28209index b9242ba..50c5edd 100644
28210--- a/arch/x86/kernel/verify_cpu.S
28211+++ b/arch/x86/kernel/verify_cpu.S
28212@@ -20,6 +20,7 @@
28213 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
28214 * arch/x86/kernel/trampoline_64.S: secondary processor verification
28215 * arch/x86/kernel/head_32.S: processor startup
28216+ * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
28217 *
28218 * verify_cpu, returns the status of longmode and SSE in register %eax.
28219 * 0: Success 1: Failure
28220diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
28221index e8edcf5..27f9344 100644
28222--- a/arch/x86/kernel/vm86_32.c
28223+++ b/arch/x86/kernel/vm86_32.c
28224@@ -44,6 +44,7 @@
28225 #include <linux/ptrace.h>
28226 #include <linux/audit.h>
28227 #include <linux/stddef.h>
28228+#include <linux/grsecurity.h>
28229
28230 #include <asm/uaccess.h>
28231 #include <asm/io.h>
28232@@ -150,7 +151,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
28233 do_exit(SIGSEGV);
28234 }
28235
28236- tss = &per_cpu(init_tss, get_cpu());
28237+ tss = init_tss + get_cpu();
28238 current->thread.sp0 = current->thread.saved_sp0;
28239 current->thread.sysenter_cs = __KERNEL_CS;
28240 load_sp0(tss, &current->thread);
28241@@ -214,6 +215,14 @@ SYSCALL_DEFINE1(vm86old, struct vm86_struct __user *, v86)
28242
28243 if (tsk->thread.saved_sp0)
28244 return -EPERM;
28245+
28246+#ifdef CONFIG_GRKERNSEC_VM86
28247+ if (!capable(CAP_SYS_RAWIO)) {
28248+ gr_handle_vm86();
28249+ return -EPERM;
28250+ }
28251+#endif
28252+
28253 tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs,
28254 offsetof(struct kernel_vm86_struct, vm86plus) -
28255 sizeof(info.regs));
28256@@ -238,6 +247,13 @@ SYSCALL_DEFINE2(vm86, unsigned long, cmd, unsigned long, arg)
28257 int tmp;
28258 struct vm86plus_struct __user *v86;
28259
28260+#ifdef CONFIG_GRKERNSEC_VM86
28261+ if (!capable(CAP_SYS_RAWIO)) {
28262+ gr_handle_vm86();
28263+ return -EPERM;
28264+ }
28265+#endif
28266+
28267 tsk = current;
28268 switch (cmd) {
28269 case VM86_REQUEST_IRQ:
28270@@ -318,7 +334,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
28271 tsk->thread.saved_fs = info->regs32->fs;
28272 tsk->thread.saved_gs = get_user_gs(info->regs32);
28273
28274- tss = &per_cpu(init_tss, get_cpu());
28275+ tss = init_tss + get_cpu();
28276 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
28277 if (cpu_has_sep)
28278 tsk->thread.sysenter_cs = 0;
28279@@ -525,7 +541,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
28280 goto cannot_handle;
28281 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
28282 goto cannot_handle;
28283- intr_ptr = (unsigned long __user *) (i << 2);
28284+ intr_ptr = (__force unsigned long __user *) (i << 2);
28285 if (get_user(segoffs, intr_ptr))
28286 goto cannot_handle;
28287 if ((segoffs >> 16) == BIOSSEG)
28288diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
28289index 00bf300..129df8e 100644
28290--- a/arch/x86/kernel/vmlinux.lds.S
28291+++ b/arch/x86/kernel/vmlinux.lds.S
28292@@ -26,6 +26,13 @@
28293 #include <asm/page_types.h>
28294 #include <asm/cache.h>
28295 #include <asm/boot.h>
28296+#include <asm/segment.h>
28297+
28298+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
28299+#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
28300+#else
28301+#define __KERNEL_TEXT_OFFSET 0
28302+#endif
28303
28304 #undef i386 /* in case the preprocessor is a 32bit one */
28305
28306@@ -69,30 +76,43 @@ jiffies_64 = jiffies;
28307
28308 PHDRS {
28309 text PT_LOAD FLAGS(5); /* R_E */
28310+#ifdef CONFIG_X86_32
28311+ module PT_LOAD FLAGS(5); /* R_E */
28312+#endif
28313+#ifdef CONFIG_XEN
28314+ rodata PT_LOAD FLAGS(5); /* R_E */
28315+#else
28316+ rodata PT_LOAD FLAGS(4); /* R__ */
28317+#endif
28318 data PT_LOAD FLAGS(6); /* RW_ */
28319-#ifdef CONFIG_X86_64
28320+ init.begin PT_LOAD FLAGS(6); /* RW_ */
28321 #ifdef CONFIG_SMP
28322 percpu PT_LOAD FLAGS(6); /* RW_ */
28323 #endif
28324+ text.init PT_LOAD FLAGS(5); /* R_E */
28325+ text.exit PT_LOAD FLAGS(5); /* R_E */
28326 init PT_LOAD FLAGS(7); /* RWE */
28327-#endif
28328 note PT_NOTE FLAGS(0); /* ___ */
28329 }
28330
28331 SECTIONS
28332 {
28333 #ifdef CONFIG_X86_32
28334- . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
28335- phys_startup_32 = startup_32 - LOAD_OFFSET;
28336+ . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
28337 #else
28338- . = __START_KERNEL;
28339- phys_startup_64 = startup_64 - LOAD_OFFSET;
28340+ . = __START_KERNEL;
28341 #endif
28342
28343 /* Text and read-only data */
28344- .text : AT(ADDR(.text) - LOAD_OFFSET) {
28345- _text = .;
28346+ .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
28347 /* bootstrapping code */
28348+#ifdef CONFIG_X86_32
28349+ phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
28350+#else
28351+ phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
28352+#endif
28353+ __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
28354+ _text = .;
28355 HEAD_TEXT
28356 . = ALIGN(8);
28357 _stext = .;
28358@@ -104,13 +124,47 @@ SECTIONS
28359 IRQENTRY_TEXT
28360 *(.fixup)
28361 *(.gnu.warning)
28362- /* End of text section */
28363- _etext = .;
28364 } :text = 0x9090
28365
28366- NOTES :text :note
28367+ . += __KERNEL_TEXT_OFFSET;
28368
28369- EXCEPTION_TABLE(16) :text = 0x9090
28370+#ifdef CONFIG_X86_32
28371+ . = ALIGN(PAGE_SIZE);
28372+ .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
28373+
28374+#ifdef CONFIG_PAX_KERNEXEC
28375+ MODULES_EXEC_VADDR = .;
28376+ BYTE(0)
28377+ . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
28378+ . = ALIGN(HPAGE_SIZE) - 1;
28379+ MODULES_EXEC_END = .;
28380+#endif
28381+
28382+ } :module
28383+#endif
28384+
28385+ .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
28386+ /* End of text section */
28387+ BYTE(0)
28388+ _etext = . - __KERNEL_TEXT_OFFSET;
28389+ }
28390+
28391+#ifdef CONFIG_X86_32
28392+ . = ALIGN(PAGE_SIZE);
28393+ .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
28394+ . = ALIGN(PAGE_SIZE);
28395+ *(.empty_zero_page)
28396+ *(.initial_pg_fixmap)
28397+ *(.initial_pg_pmd)
28398+ *(.initial_page_table)
28399+ *(.swapper_pg_dir)
28400+ } :rodata
28401+#endif
28402+
28403+ . = ALIGN(PAGE_SIZE);
28404+ NOTES :rodata :note
28405+
28406+ EXCEPTION_TABLE(16) :rodata
28407
28408 #if defined(CONFIG_DEBUG_RODATA)
28409 /* .text should occupy whole number of pages */
28410@@ -122,16 +176,20 @@ SECTIONS
28411
28412 /* Data */
28413 .data : AT(ADDR(.data) - LOAD_OFFSET) {
28414+
28415+#ifdef CONFIG_PAX_KERNEXEC
28416+ . = ALIGN(HPAGE_SIZE);
28417+#else
28418+ . = ALIGN(PAGE_SIZE);
28419+#endif
28420+
28421 /* Start of data section */
28422 _sdata = .;
28423
28424 /* init_task */
28425 INIT_TASK_DATA(THREAD_SIZE)
28426
28427-#ifdef CONFIG_X86_32
28428- /* 32 bit has nosave before _edata */
28429 NOSAVE_DATA
28430-#endif
28431
28432 PAGE_ALIGNED_DATA(PAGE_SIZE)
28433
28434@@ -174,12 +232,19 @@ SECTIONS
28435 . = ALIGN(__vvar_page + PAGE_SIZE, PAGE_SIZE);
28436
28437 /* Init code and data - will be freed after init */
28438- . = ALIGN(PAGE_SIZE);
28439 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
28440+ BYTE(0)
28441+
28442+#ifdef CONFIG_PAX_KERNEXEC
28443+ . = ALIGN(HPAGE_SIZE);
28444+#else
28445+ . = ALIGN(PAGE_SIZE);
28446+#endif
28447+
28448 __init_begin = .; /* paired with __init_end */
28449- }
28450+ } :init.begin
28451
28452-#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
28453+#ifdef CONFIG_SMP
28454 /*
28455 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
28456 * output PHDR, so the next output section - .init.text - should
28457@@ -190,12 +255,27 @@ SECTIONS
28458 "per-CPU data too large - increase CONFIG_PHYSICAL_START")
28459 #endif
28460
28461- INIT_TEXT_SECTION(PAGE_SIZE)
28462-#ifdef CONFIG_X86_64
28463- :init
28464-#endif
28465+ . = ALIGN(PAGE_SIZE);
28466+ init_begin = .;
28467+ .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
28468+ VMLINUX_SYMBOL(_sinittext) = .;
28469+ INIT_TEXT
28470+ . = ALIGN(PAGE_SIZE);
28471+ } :text.init
28472
28473- INIT_DATA_SECTION(16)
28474+ /*
28475+ * .exit.text is discard at runtime, not link time, to deal with
28476+ * references from .altinstructions and .eh_frame
28477+ */
28478+ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
28479+ EXIT_TEXT
28480+ VMLINUX_SYMBOL(_einittext) = .;
28481+ . = ALIGN(16);
28482+ } :text.exit
28483+ . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
28484+
28485+ . = ALIGN(PAGE_SIZE);
28486+ INIT_DATA_SECTION(16) :init
28487
28488 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
28489 __x86_cpu_dev_start = .;
28490@@ -266,19 +346,12 @@ SECTIONS
28491 }
28492
28493 . = ALIGN(8);
28494- /*
28495- * .exit.text is discard at runtime, not link time, to deal with
28496- * references from .altinstructions and .eh_frame
28497- */
28498- .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
28499- EXIT_TEXT
28500- }
28501
28502 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
28503 EXIT_DATA
28504 }
28505
28506-#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
28507+#ifndef CONFIG_SMP
28508 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
28509 #endif
28510
28511@@ -297,16 +370,10 @@ SECTIONS
28512 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
28513 __smp_locks = .;
28514 *(.smp_locks)
28515- . = ALIGN(PAGE_SIZE);
28516 __smp_locks_end = .;
28517+ . = ALIGN(PAGE_SIZE);
28518 }
28519
28520-#ifdef CONFIG_X86_64
28521- .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
28522- NOSAVE_DATA
28523- }
28524-#endif
28525-
28526 /* BSS */
28527 . = ALIGN(PAGE_SIZE);
28528 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
28529@@ -322,6 +389,7 @@ SECTIONS
28530 __brk_base = .;
28531 . += 64 * 1024; /* 64k alignment slop space */
28532 *(.brk_reservation) /* areas brk users have reserved */
28533+ . = ALIGN(HPAGE_SIZE);
28534 __brk_limit = .;
28535 }
28536
28537@@ -348,13 +416,12 @@ SECTIONS
28538 * for the boot processor.
28539 */
28540 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
28541-INIT_PER_CPU(gdt_page);
28542 INIT_PER_CPU(irq_stack_union);
28543
28544 /*
28545 * Build-time check on the image size:
28546 */
28547-. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
28548+. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
28549 "kernel image bigger than KERNEL_IMAGE_SIZE");
28550
28551 #ifdef CONFIG_SMP
28552diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
28553index 2dcc6ff..082dc7a 100644
28554--- a/arch/x86/kernel/vsyscall_64.c
28555+++ b/arch/x86/kernel/vsyscall_64.c
28556@@ -38,15 +38,13 @@
28557 #define CREATE_TRACE_POINTS
28558 #include "vsyscall_trace.h"
28559
28560-static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE;
28561+static enum { EMULATE, NONE } vsyscall_mode = EMULATE;
28562
28563 static int __init vsyscall_setup(char *str)
28564 {
28565 if (str) {
28566 if (!strcmp("emulate", str))
28567 vsyscall_mode = EMULATE;
28568- else if (!strcmp("native", str))
28569- vsyscall_mode = NATIVE;
28570 else if (!strcmp("none", str))
28571 vsyscall_mode = NONE;
28572 else
28573@@ -264,8 +262,7 @@ do_ret:
28574 return true;
28575
28576 sigsegv:
28577- force_sig(SIGSEGV, current);
28578- return true;
28579+ do_group_exit(SIGKILL);
28580 }
28581
28582 /*
28583@@ -283,8 +280,8 @@ static struct vm_operations_struct gate_vma_ops = {
28584 static struct vm_area_struct gate_vma = {
28585 .vm_start = VSYSCALL_ADDR,
28586 .vm_end = VSYSCALL_ADDR + PAGE_SIZE,
28587- .vm_page_prot = PAGE_READONLY_EXEC,
28588- .vm_flags = VM_READ | VM_EXEC,
28589+ .vm_page_prot = PAGE_READONLY,
28590+ .vm_flags = VM_READ,
28591 .vm_ops = &gate_vma_ops,
28592 };
28593
28594@@ -325,10 +322,7 @@ void __init map_vsyscall(void)
28595 unsigned long physaddr_vsyscall = __pa_symbol(&__vsyscall_page);
28596
28597 if (vsyscall_mode != NONE)
28598- __set_fixmap(VSYSCALL_PAGE, physaddr_vsyscall,
28599- vsyscall_mode == NATIVE
28600- ? PAGE_KERNEL_VSYSCALL
28601- : PAGE_KERNEL_VVAR);
28602+ __set_fixmap(VSYSCALL_PAGE, physaddr_vsyscall, PAGE_KERNEL_VVAR);
28603
28604 BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_PAGE) !=
28605 (unsigned long)VSYSCALL_ADDR);
28606diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
28607index 37d8fa4..66e319a 100644
28608--- a/arch/x86/kernel/x8664_ksyms_64.c
28609+++ b/arch/x86/kernel/x8664_ksyms_64.c
28610@@ -34,8 +34,6 @@ EXPORT_SYMBOL(copy_user_generic_string);
28611 EXPORT_SYMBOL(copy_user_generic_unrolled);
28612 EXPORT_SYMBOL(copy_user_enhanced_fast_string);
28613 EXPORT_SYMBOL(__copy_user_nocache);
28614-EXPORT_SYMBOL(_copy_from_user);
28615-EXPORT_SYMBOL(_copy_to_user);
28616
28617 EXPORT_SYMBOL(copy_page);
28618 EXPORT_SYMBOL(clear_page);
28619@@ -79,3 +77,7 @@ EXPORT_SYMBOL(___preempt_schedule);
28620 EXPORT_SYMBOL(___preempt_schedule_context);
28621 #endif
28622 #endif
28623+
28624+#ifdef CONFIG_PAX_PER_CPU_PGD
28625+EXPORT_SYMBOL(cpu_pgd);
28626+#endif
28627diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
28628index 234b072..b7ab191 100644
28629--- a/arch/x86/kernel/x86_init.c
28630+++ b/arch/x86/kernel/x86_init.c
28631@@ -93,7 +93,7 @@ struct x86_cpuinit_ops x86_cpuinit = {
28632 static void default_nmi_init(void) { };
28633 static int default_i8042_detect(void) { return 1; };
28634
28635-struct x86_platform_ops x86_platform = {
28636+struct x86_platform_ops x86_platform __read_only = {
28637 .calibrate_tsc = native_calibrate_tsc,
28638 .get_wallclock = mach_get_cmos_time,
28639 .set_wallclock = mach_set_rtc_mmss,
28640@@ -109,7 +109,7 @@ struct x86_platform_ops x86_platform = {
28641 EXPORT_SYMBOL_GPL(x86_platform);
28642
28643 #if defined(CONFIG_PCI_MSI)
28644-struct x86_msi_ops x86_msi = {
28645+struct x86_msi_ops x86_msi __read_only = {
28646 .setup_msi_irqs = native_setup_msi_irqs,
28647 .compose_msi_msg = native_compose_msi_msg,
28648 .teardown_msi_irq = native_teardown_msi_irq,
28649@@ -140,7 +140,7 @@ void arch_restore_msi_irqs(struct pci_dev *dev)
28650 }
28651 #endif
28652
28653-struct x86_io_apic_ops x86_io_apic_ops = {
28654+struct x86_io_apic_ops x86_io_apic_ops __read_only = {
28655 .init = native_io_apic_init_mappings,
28656 .read = native_io_apic_read,
28657 .write = native_io_apic_write,
28658diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
28659index cdc6cf9..e04f495 100644
28660--- a/arch/x86/kernel/xsave.c
28661+++ b/arch/x86/kernel/xsave.c
28662@@ -168,18 +168,18 @@ static inline int save_xstate_epilog(void __user *buf, int ia32_frame)
28663
28664 /* Setup the bytes not touched by the [f]xsave and reserved for SW. */
28665 sw_bytes = ia32_frame ? &fx_sw_reserved_ia32 : &fx_sw_reserved;
28666- err = __copy_to_user(&x->i387.sw_reserved, sw_bytes, sizeof(*sw_bytes));
28667+ err = __copy_to_user(x->i387.sw_reserved, sw_bytes, sizeof(*sw_bytes));
28668
28669 if (!use_xsave())
28670 return err;
28671
28672- err |= __put_user(FP_XSTATE_MAGIC2, (__u32 *)(buf + xstate_size));
28673+ err |= __put_user(FP_XSTATE_MAGIC2, (__u32 __user *)(buf + xstate_size));
28674
28675 /*
28676 * Read the xstate_bv which we copied (directly from the cpu or
28677 * from the state in task struct) to the user buffers.
28678 */
28679- err |= __get_user(xstate_bv, (__u32 *)&x->xsave_hdr.xstate_bv);
28680+ err |= __get_user(xstate_bv, (__u32 __user *)&x->xsave_hdr.xstate_bv);
28681
28682 /*
28683 * For legacy compatible, we always set FP/SSE bits in the bit
28684@@ -194,7 +194,7 @@ static inline int save_xstate_epilog(void __user *buf, int ia32_frame)
28685 */
28686 xstate_bv |= XSTATE_FPSSE;
28687
28688- err |= __put_user(xstate_bv, (__u32 *)&x->xsave_hdr.xstate_bv);
28689+ err |= __put_user(xstate_bv, (__u32 __user *)&x->xsave_hdr.xstate_bv);
28690
28691 return err;
28692 }
28693@@ -203,6 +203,7 @@ static inline int save_user_xstate(struct xsave_struct __user *buf)
28694 {
28695 int err;
28696
28697+ buf = (struct xsave_struct __user *)____m(buf);
28698 if (use_xsave())
28699 err = xsave_user(buf);
28700 else if (use_fxsr())
28701@@ -313,6 +314,7 @@ sanitize_restored_xstate(struct task_struct *tsk,
28702 */
28703 static inline int restore_user_xstate(void __user *buf, u64 xbv, int fx_only)
28704 {
28705+ buf = (void __user *)____m(buf);
28706 if (use_xsave()) {
28707 if ((unsigned long)buf % 64 || fx_only) {
28708 u64 init_bv = pcntxt_mask & ~XSTATE_FPSSE;
28709diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
28710index 307f9ec..0d8aa91 100644
28711--- a/arch/x86/kvm/cpuid.c
28712+++ b/arch/x86/kvm/cpuid.c
28713@@ -186,15 +186,20 @@ int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
28714 struct kvm_cpuid2 *cpuid,
28715 struct kvm_cpuid_entry2 __user *entries)
28716 {
28717- int r;
28718+ int r, i;
28719
28720 r = -E2BIG;
28721 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
28722 goto out;
28723 r = -EFAULT;
28724- if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
28725- cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
28726+ if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
28727 goto out;
28728+ for (i = 0; i < cpuid->nent; ++i) {
28729+ struct kvm_cpuid_entry2 cpuid_entry;
28730+ if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
28731+ goto out;
28732+ vcpu->arch.cpuid_entries[i] = cpuid_entry;
28733+ }
28734 vcpu->arch.cpuid_nent = cpuid->nent;
28735 kvm_apic_set_version(vcpu);
28736 kvm_x86_ops->cpuid_update(vcpu);
28737@@ -207,15 +212,19 @@ int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
28738 struct kvm_cpuid2 *cpuid,
28739 struct kvm_cpuid_entry2 __user *entries)
28740 {
28741- int r;
28742+ int r, i;
28743
28744 r = -E2BIG;
28745 if (cpuid->nent < vcpu->arch.cpuid_nent)
28746 goto out;
28747 r = -EFAULT;
28748- if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
28749- vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
28750+ if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
28751 goto out;
28752+ for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
28753+ struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
28754+ if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
28755+ goto out;
28756+ }
28757 return 0;
28758
28759 out:
28760diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
28761index 106c015..2db7161 100644
28762--- a/arch/x86/kvm/emulate.c
28763+++ b/arch/x86/kvm/emulate.c
28764@@ -3572,7 +3572,7 @@ static int check_cr_write(struct x86_emulate_ctxt *ctxt)
28765 int cr = ctxt->modrm_reg;
28766 u64 efer = 0;
28767
28768- static u64 cr_reserved_bits[] = {
28769+ static const u64 cr_reserved_bits[] = {
28770 0xffffffff00000000ULL,
28771 0, 0, 0, /* CR3 checked later */
28772 CR4_RESERVED_BITS,
28773diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
28774index 8ee4aa7..40c3d4c 100644
28775--- a/arch/x86/kvm/lapic.c
28776+++ b/arch/x86/kvm/lapic.c
28777@@ -56,7 +56,7 @@
28778 #define APIC_BUS_CYCLE_NS 1
28779
28780 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
28781-#define apic_debug(fmt, arg...)
28782+#define apic_debug(fmt, arg...) do {} while (0)
28783
28784 #define APIC_LVT_NUM 6
28785 /* 14 is the version for Xeon and Pentium 8.4.8*/
28786diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
28787index 0bc6c65..ca4f92d 100644
28788--- a/arch/x86/kvm/lapic.h
28789+++ b/arch/x86/kvm/lapic.h
28790@@ -165,7 +165,7 @@ static inline u16 apic_logical_id(struct kvm_apic_map *map, u32 ldr)
28791
28792 static inline bool kvm_apic_has_events(struct kvm_vcpu *vcpu)
28793 {
28794- return vcpu->arch.apic->pending_events;
28795+ return kvm_vcpu_has_lapic(vcpu) && vcpu->arch.apic->pending_events;
28796 }
28797
28798 bool kvm_apic_pending_eoi(struct kvm_vcpu *vcpu, int vector);
28799diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
28800index 6e6d115..43fecbf 100644
28801--- a/arch/x86/kvm/paging_tmpl.h
28802+++ b/arch/x86/kvm/paging_tmpl.h
28803@@ -343,7 +343,7 @@ retry_walk:
28804 if (unlikely(kvm_is_error_hva(host_addr)))
28805 goto error;
28806
28807- ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
28808+ ptep_user = (pt_element_t __force_user *)((void *)host_addr + offset);
28809 if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
28810 goto error;
28811 walker->ptep_user[walker->level - 1] = ptep_user;
28812diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
28813index 1b32e29..076a16d 100644
28814--- a/arch/x86/kvm/svm.c
28815+++ b/arch/x86/kvm/svm.c
28816@@ -3570,7 +3570,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
28817 int cpu = raw_smp_processor_id();
28818
28819 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
28820+
28821+ pax_open_kernel();
28822 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
28823+ pax_close_kernel();
28824+
28825 load_TR_desc();
28826 }
28827
28828@@ -3966,6 +3970,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
28829 #endif
28830 #endif
28831
28832+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
28833+ __set_fs(current_thread_info()->addr_limit);
28834+#endif
28835+
28836 reload_tss(vcpu);
28837
28838 local_irq_disable();
28839diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
28840index 5318d64..ff5f7aa 100644
28841--- a/arch/x86/kvm/vmx.c
28842+++ b/arch/x86/kvm/vmx.c
28843@@ -1440,12 +1440,12 @@ static void vmcs_write64(unsigned long field, u64 value)
28844 #endif
28845 }
28846
28847-static void vmcs_clear_bits(unsigned long field, u32 mask)
28848+static void vmcs_clear_bits(unsigned long field, unsigned long mask)
28849 {
28850 vmcs_writel(field, vmcs_readl(field) & ~mask);
28851 }
28852
28853-static void vmcs_set_bits(unsigned long field, u32 mask)
28854+static void vmcs_set_bits(unsigned long field, unsigned long mask)
28855 {
28856 vmcs_writel(field, vmcs_readl(field) | mask);
28857 }
28858@@ -1705,7 +1705,11 @@ static void reload_tss(void)
28859 struct desc_struct *descs;
28860
28861 descs = (void *)gdt->address;
28862+
28863+ pax_open_kernel();
28864 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
28865+ pax_close_kernel();
28866+
28867 load_TR_desc();
28868 }
28869
28870@@ -1941,6 +1945,10 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
28871 vmcs_writel(HOST_TR_BASE, kvm_read_tr_base()); /* 22.2.4 */
28872 vmcs_writel(HOST_GDTR_BASE, gdt->address); /* 22.2.4 */
28873
28874+#ifdef CONFIG_PAX_PER_CPU_PGD
28875+ vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
28876+#endif
28877+
28878 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
28879 vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
28880 vmx->loaded_vmcs->cpu = cpu;
28881@@ -2233,7 +2241,7 @@ static void setup_msrs(struct vcpu_vmx *vmx)
28882 * reads and returns guest's timestamp counter "register"
28883 * guest_tsc = host_tsc + tsc_offset -- 21.3
28884 */
28885-static u64 guest_read_tsc(void)
28886+static u64 __intentional_overflow(-1) guest_read_tsc(void)
28887 {
28888 u64 host_tsc, tsc_offset;
28889
28890@@ -4466,7 +4474,10 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
28891 unsigned long cr4;
28892
28893 vmcs_writel(HOST_CR0, read_cr0() & ~X86_CR0_TS); /* 22.2.3 */
28894+
28895+#ifndef CONFIG_PAX_PER_CPU_PGD
28896 vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
28897+#endif
28898
28899 /* Save the most likely value for this task's CR4 in the VMCS. */
28900 cr4 = cr4_read_shadow();
28901@@ -4493,7 +4504,7 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
28902 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
28903 vmx->host_idt_base = dt.address;
28904
28905- vmcs_writel(HOST_RIP, vmx_return); /* 22.2.5 */
28906+ vmcs_writel(HOST_RIP, ktla_ktva(vmx_return)); /* 22.2.5 */
28907
28908 rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
28909 vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
28910@@ -6104,11 +6115,17 @@ static __init int hardware_setup(void)
28911 * page upon invalidation. No need to do anything if not
28912 * using the APIC_ACCESS_ADDR VMCS field.
28913 */
28914- if (!flexpriority_enabled)
28915- kvm_x86_ops->set_apic_access_page_addr = NULL;
28916+ if (!flexpriority_enabled) {
28917+ pax_open_kernel();
28918+ *(void **)&kvm_x86_ops->set_apic_access_page_addr = NULL;
28919+ pax_close_kernel();
28920+ }
28921
28922- if (!cpu_has_vmx_tpr_shadow())
28923- kvm_x86_ops->update_cr8_intercept = NULL;
28924+ if (!cpu_has_vmx_tpr_shadow()) {
28925+ pax_open_kernel();
28926+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
28927+ pax_close_kernel();
28928+ }
28929
28930 if (enable_ept && !cpu_has_vmx_ept_2m_page())
28931 kvm_disable_largepages();
28932@@ -6119,14 +6136,16 @@ static __init int hardware_setup(void)
28933 if (!cpu_has_vmx_apicv())
28934 enable_apicv = 0;
28935
28936+ pax_open_kernel();
28937 if (enable_apicv)
28938- kvm_x86_ops->update_cr8_intercept = NULL;
28939+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
28940 else {
28941- kvm_x86_ops->hwapic_irr_update = NULL;
28942- kvm_x86_ops->hwapic_isr_update = NULL;
28943- kvm_x86_ops->deliver_posted_interrupt = NULL;
28944- kvm_x86_ops->sync_pir_to_irr = vmx_sync_pir_to_irr_dummy;
28945+ *(void **)&kvm_x86_ops->hwapic_irr_update = NULL;
28946+ *(void **)&kvm_x86_ops->hwapic_isr_update = NULL;
28947+ *(void **)&kvm_x86_ops->deliver_posted_interrupt = NULL;
28948+ *(void **)&kvm_x86_ops->sync_pir_to_irr = vmx_sync_pir_to_irr_dummy;
28949 }
28950+ pax_close_kernel();
28951
28952 vmx_disable_intercept_for_msr(MSR_FS_BASE, false);
28953 vmx_disable_intercept_for_msr(MSR_GS_BASE, false);
28954@@ -6179,10 +6198,12 @@ static __init int hardware_setup(void)
28955 enable_pml = 0;
28956
28957 if (!enable_pml) {
28958- kvm_x86_ops->slot_enable_log_dirty = NULL;
28959- kvm_x86_ops->slot_disable_log_dirty = NULL;
28960- kvm_x86_ops->flush_log_dirty = NULL;
28961- kvm_x86_ops->enable_log_dirty_pt_masked = NULL;
28962+ pax_open_kernel();
28963+ *(void **)&kvm_x86_ops->slot_enable_log_dirty = NULL;
28964+ *(void **)&kvm_x86_ops->slot_disable_log_dirty = NULL;
28965+ *(void **)&kvm_x86_ops->flush_log_dirty = NULL;
28966+ *(void **)&kvm_x86_ops->enable_log_dirty_pt_masked = NULL;
28967+ pax_close_kernel();
28968 }
28969
28970 return alloc_kvm_area();
28971@@ -8227,6 +8248,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
28972 "jmp 2f \n\t"
28973 "1: " __ex(ASM_VMX_VMRESUME) "\n\t"
28974 "2: "
28975+
28976+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
28977+ "ljmp %[cs],$3f\n\t"
28978+ "3: "
28979+#endif
28980+
28981 /* Save guest registers, load host registers, keep flags */
28982 "mov %0, %c[wordsize](%%" _ASM_SP ") \n\t"
28983 "pop %0 \n\t"
28984@@ -8279,6 +8306,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
28985 #endif
28986 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
28987 [wordsize]"i"(sizeof(ulong))
28988+
28989+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
28990+ ,[cs]"i"(__KERNEL_CS)
28991+#endif
28992+
28993 : "cc", "memory"
28994 #ifdef CONFIG_X86_64
28995 , "rax", "rbx", "rdi", "rsi"
28996@@ -8292,7 +8324,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
28997 if (debugctlmsr)
28998 update_debugctlmsr(debugctlmsr);
28999
29000-#ifndef CONFIG_X86_64
29001+#ifdef CONFIG_X86_32
29002 /*
29003 * The sysexit path does not restore ds/es, so we must set them to
29004 * a reasonable value ourselves.
29005@@ -8301,8 +8333,18 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
29006 * may be executed in interrupt context, which saves and restore segments
29007 * around it, nullifying its effect.
29008 */
29009- loadsegment(ds, __USER_DS);
29010- loadsegment(es, __USER_DS);
29011+ loadsegment(ds, __KERNEL_DS);
29012+ loadsegment(es, __KERNEL_DS);
29013+ loadsegment(ss, __KERNEL_DS);
29014+
29015+#ifdef CONFIG_PAX_KERNEXEC
29016+ loadsegment(fs, __KERNEL_PERCPU);
29017+#endif
29018+
29019+#ifdef CONFIG_PAX_MEMORY_UDEREF
29020+ __set_fs(current_thread_info()->addr_limit);
29021+#endif
29022+
29023 #endif
29024
29025 vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
29026diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
29027index 8838057..8f42ce3 100644
29028--- a/arch/x86/kvm/x86.c
29029+++ b/arch/x86/kvm/x86.c
29030@@ -1895,8 +1895,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
29031 {
29032 struct kvm *kvm = vcpu->kvm;
29033 int lm = is_long_mode(vcpu);
29034- u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
29035- : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
29036+ u8 __user *blob_addr = lm ? (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_64
29037+ : (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
29038 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
29039 : kvm->arch.xen_hvm_config.blob_size_32;
29040 u32 page_num = data & ~PAGE_MASK;
29041@@ -2833,6 +2833,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
29042 if (n < msr_list.nmsrs)
29043 goto out;
29044 r = -EFAULT;
29045+ if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
29046+ goto out;
29047 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
29048 num_msrs_to_save * sizeof(u32)))
29049 goto out;
29050@@ -5737,7 +5739,7 @@ static struct notifier_block pvclock_gtod_notifier = {
29051 };
29052 #endif
29053
29054-int kvm_arch_init(void *opaque)
29055+int kvm_arch_init(const void *opaque)
29056 {
29057 int r;
29058 struct kvm_x86_ops *ops = opaque;
29059diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
29060index ac4453d..1f43bf3 100644
29061--- a/arch/x86/lguest/boot.c
29062+++ b/arch/x86/lguest/boot.c
29063@@ -1340,9 +1340,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
29064 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
29065 * Launcher to reboot us.
29066 */
29067-static void lguest_restart(char *reason)
29068+static __noreturn void lguest_restart(char *reason)
29069 {
29070 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
29071+ BUG();
29072 }
29073
29074 /*G:050
29075diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S
29076index 00933d5..3a64af9 100644
29077--- a/arch/x86/lib/atomic64_386_32.S
29078+++ b/arch/x86/lib/atomic64_386_32.S
29079@@ -48,6 +48,10 @@ BEGIN(read)
29080 movl (v), %eax
29081 movl 4(v), %edx
29082 RET_ENDP
29083+BEGIN(read_unchecked)
29084+ movl (v), %eax
29085+ movl 4(v), %edx
29086+RET_ENDP
29087 #undef v
29088
29089 #define v %esi
29090@@ -55,6 +59,10 @@ BEGIN(set)
29091 movl %ebx, (v)
29092 movl %ecx, 4(v)
29093 RET_ENDP
29094+BEGIN(set_unchecked)
29095+ movl %ebx, (v)
29096+ movl %ecx, 4(v)
29097+RET_ENDP
29098 #undef v
29099
29100 #define v %esi
29101@@ -70,6 +78,20 @@ RET_ENDP
29102 BEGIN(add)
29103 addl %eax, (v)
29104 adcl %edx, 4(v)
29105+
29106+#ifdef CONFIG_PAX_REFCOUNT
29107+ jno 0f
29108+ subl %eax, (v)
29109+ sbbl %edx, 4(v)
29110+ int $4
29111+0:
29112+ _ASM_EXTABLE(0b, 0b)
29113+#endif
29114+
29115+RET_ENDP
29116+BEGIN(add_unchecked)
29117+ addl %eax, (v)
29118+ adcl %edx, 4(v)
29119 RET_ENDP
29120 #undef v
29121
29122@@ -77,6 +99,24 @@ RET_ENDP
29123 BEGIN(add_return)
29124 addl (v), %eax
29125 adcl 4(v), %edx
29126+
29127+#ifdef CONFIG_PAX_REFCOUNT
29128+ into
29129+1234:
29130+ _ASM_EXTABLE(1234b, 2f)
29131+#endif
29132+
29133+ movl %eax, (v)
29134+ movl %edx, 4(v)
29135+
29136+#ifdef CONFIG_PAX_REFCOUNT
29137+2:
29138+#endif
29139+
29140+RET_ENDP
29141+BEGIN(add_return_unchecked)
29142+ addl (v), %eax
29143+ adcl 4(v), %edx
29144 movl %eax, (v)
29145 movl %edx, 4(v)
29146 RET_ENDP
29147@@ -86,6 +126,20 @@ RET_ENDP
29148 BEGIN(sub)
29149 subl %eax, (v)
29150 sbbl %edx, 4(v)
29151+
29152+#ifdef CONFIG_PAX_REFCOUNT
29153+ jno 0f
29154+ addl %eax, (v)
29155+ adcl %edx, 4(v)
29156+ int $4
29157+0:
29158+ _ASM_EXTABLE(0b, 0b)
29159+#endif
29160+
29161+RET_ENDP
29162+BEGIN(sub_unchecked)
29163+ subl %eax, (v)
29164+ sbbl %edx, 4(v)
29165 RET_ENDP
29166 #undef v
29167
29168@@ -96,6 +150,27 @@ BEGIN(sub_return)
29169 sbbl $0, %edx
29170 addl (v), %eax
29171 adcl 4(v), %edx
29172+
29173+#ifdef CONFIG_PAX_REFCOUNT
29174+ into
29175+1234:
29176+ _ASM_EXTABLE(1234b, 2f)
29177+#endif
29178+
29179+ movl %eax, (v)
29180+ movl %edx, 4(v)
29181+
29182+#ifdef CONFIG_PAX_REFCOUNT
29183+2:
29184+#endif
29185+
29186+RET_ENDP
29187+BEGIN(sub_return_unchecked)
29188+ negl %edx
29189+ negl %eax
29190+ sbbl $0, %edx
29191+ addl (v), %eax
29192+ adcl 4(v), %edx
29193 movl %eax, (v)
29194 movl %edx, 4(v)
29195 RET_ENDP
29196@@ -105,6 +180,20 @@ RET_ENDP
29197 BEGIN(inc)
29198 addl $1, (v)
29199 adcl $0, 4(v)
29200+
29201+#ifdef CONFIG_PAX_REFCOUNT
29202+ jno 0f
29203+ subl $1, (v)
29204+ sbbl $0, 4(v)
29205+ int $4
29206+0:
29207+ _ASM_EXTABLE(0b, 0b)
29208+#endif
29209+
29210+RET_ENDP
29211+BEGIN(inc_unchecked)
29212+ addl $1, (v)
29213+ adcl $0, 4(v)
29214 RET_ENDP
29215 #undef v
29216
29217@@ -114,6 +203,26 @@ BEGIN(inc_return)
29218 movl 4(v), %edx
29219 addl $1, %eax
29220 adcl $0, %edx
29221+
29222+#ifdef CONFIG_PAX_REFCOUNT
29223+ into
29224+1234:
29225+ _ASM_EXTABLE(1234b, 2f)
29226+#endif
29227+
29228+ movl %eax, (v)
29229+ movl %edx, 4(v)
29230+
29231+#ifdef CONFIG_PAX_REFCOUNT
29232+2:
29233+#endif
29234+
29235+RET_ENDP
29236+BEGIN(inc_return_unchecked)
29237+ movl (v), %eax
29238+ movl 4(v), %edx
29239+ addl $1, %eax
29240+ adcl $0, %edx
29241 movl %eax, (v)
29242 movl %edx, 4(v)
29243 RET_ENDP
29244@@ -123,6 +232,20 @@ RET_ENDP
29245 BEGIN(dec)
29246 subl $1, (v)
29247 sbbl $0, 4(v)
29248+
29249+#ifdef CONFIG_PAX_REFCOUNT
29250+ jno 0f
29251+ addl $1, (v)
29252+ adcl $0, 4(v)
29253+ int $4
29254+0:
29255+ _ASM_EXTABLE(0b, 0b)
29256+#endif
29257+
29258+RET_ENDP
29259+BEGIN(dec_unchecked)
29260+ subl $1, (v)
29261+ sbbl $0, 4(v)
29262 RET_ENDP
29263 #undef v
29264
29265@@ -132,6 +255,26 @@ BEGIN(dec_return)
29266 movl 4(v), %edx
29267 subl $1, %eax
29268 sbbl $0, %edx
29269+
29270+#ifdef CONFIG_PAX_REFCOUNT
29271+ into
29272+1234:
29273+ _ASM_EXTABLE(1234b, 2f)
29274+#endif
29275+
29276+ movl %eax, (v)
29277+ movl %edx, 4(v)
29278+
29279+#ifdef CONFIG_PAX_REFCOUNT
29280+2:
29281+#endif
29282+
29283+RET_ENDP
29284+BEGIN(dec_return_unchecked)
29285+ movl (v), %eax
29286+ movl 4(v), %edx
29287+ subl $1, %eax
29288+ sbbl $0, %edx
29289 movl %eax, (v)
29290 movl %edx, 4(v)
29291 RET_ENDP
29292@@ -143,6 +286,13 @@ BEGIN(add_unless)
29293 adcl %edx, %edi
29294 addl (v), %eax
29295 adcl 4(v), %edx
29296+
29297+#ifdef CONFIG_PAX_REFCOUNT
29298+ into
29299+1234:
29300+ _ASM_EXTABLE(1234b, 2f)
29301+#endif
29302+
29303 cmpl %eax, %ecx
29304 je 3f
29305 1:
29306@@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
29307 1:
29308 addl $1, %eax
29309 adcl $0, %edx
29310+
29311+#ifdef CONFIG_PAX_REFCOUNT
29312+ into
29313+1234:
29314+ _ASM_EXTABLE(1234b, 2f)
29315+#endif
29316+
29317 movl %eax, (v)
29318 movl %edx, 4(v)
29319 movl $1, %eax
29320@@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
29321 movl 4(v), %edx
29322 subl $1, %eax
29323 sbbl $0, %edx
29324+
29325+#ifdef CONFIG_PAX_REFCOUNT
29326+ into
29327+1234:
29328+ _ASM_EXTABLE(1234b, 1f)
29329+#endif
29330+
29331 js 1f
29332 movl %eax, (v)
29333 movl %edx, 4(v)
29334diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
29335index f5cc9eb..51fa319 100644
29336--- a/arch/x86/lib/atomic64_cx8_32.S
29337+++ b/arch/x86/lib/atomic64_cx8_32.S
29338@@ -35,10 +35,20 @@ ENTRY(atomic64_read_cx8)
29339 CFI_STARTPROC
29340
29341 read64 %ecx
29342+ pax_force_retaddr
29343 ret
29344 CFI_ENDPROC
29345 ENDPROC(atomic64_read_cx8)
29346
29347+ENTRY(atomic64_read_unchecked_cx8)
29348+ CFI_STARTPROC
29349+
29350+ read64 %ecx
29351+ pax_force_retaddr
29352+ ret
29353+ CFI_ENDPROC
29354+ENDPROC(atomic64_read_unchecked_cx8)
29355+
29356 ENTRY(atomic64_set_cx8)
29357 CFI_STARTPROC
29358
29359@@ -48,10 +58,25 @@ ENTRY(atomic64_set_cx8)
29360 cmpxchg8b (%esi)
29361 jne 1b
29362
29363+ pax_force_retaddr
29364 ret
29365 CFI_ENDPROC
29366 ENDPROC(atomic64_set_cx8)
29367
29368+ENTRY(atomic64_set_unchecked_cx8)
29369+ CFI_STARTPROC
29370+
29371+1:
29372+/* we don't need LOCK_PREFIX since aligned 64-bit writes
29373+ * are atomic on 586 and newer */
29374+ cmpxchg8b (%esi)
29375+ jne 1b
29376+
29377+ pax_force_retaddr
29378+ ret
29379+ CFI_ENDPROC
29380+ENDPROC(atomic64_set_unchecked_cx8)
29381+
29382 ENTRY(atomic64_xchg_cx8)
29383 CFI_STARTPROC
29384
29385@@ -60,12 +85,13 @@ ENTRY(atomic64_xchg_cx8)
29386 cmpxchg8b (%esi)
29387 jne 1b
29388
29389+ pax_force_retaddr
29390 ret
29391 CFI_ENDPROC
29392 ENDPROC(atomic64_xchg_cx8)
29393
29394-.macro addsub_return func ins insc
29395-ENTRY(atomic64_\func\()_return_cx8)
29396+.macro addsub_return func ins insc unchecked=""
29397+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
29398 CFI_STARTPROC
29399 SAVE ebp
29400 SAVE ebx
29401@@ -82,27 +108,44 @@ ENTRY(atomic64_\func\()_return_cx8)
29402 movl %edx, %ecx
29403 \ins\()l %esi, %ebx
29404 \insc\()l %edi, %ecx
29405+
29406+.ifb \unchecked
29407+#ifdef CONFIG_PAX_REFCOUNT
29408+ into
29409+2:
29410+ _ASM_EXTABLE(2b, 3f)
29411+#endif
29412+.endif
29413+
29414 LOCK_PREFIX
29415 cmpxchg8b (%ebp)
29416 jne 1b
29417-
29418-10:
29419 movl %ebx, %eax
29420 movl %ecx, %edx
29421+
29422+.ifb \unchecked
29423+#ifdef CONFIG_PAX_REFCOUNT
29424+3:
29425+#endif
29426+.endif
29427+
29428 RESTORE edi
29429 RESTORE esi
29430 RESTORE ebx
29431 RESTORE ebp
29432+ pax_force_retaddr
29433 ret
29434 CFI_ENDPROC
29435-ENDPROC(atomic64_\func\()_return_cx8)
29436+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
29437 .endm
29438
29439 addsub_return add add adc
29440 addsub_return sub sub sbb
29441+addsub_return add add adc _unchecked
29442+addsub_return sub sub sbb _unchecked
29443
29444-.macro incdec_return func ins insc
29445-ENTRY(atomic64_\func\()_return_cx8)
29446+.macro incdec_return func ins insc unchecked=""
29447+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
29448 CFI_STARTPROC
29449 SAVE ebx
29450
29451@@ -112,21 +155,39 @@ ENTRY(atomic64_\func\()_return_cx8)
29452 movl %edx, %ecx
29453 \ins\()l $1, %ebx
29454 \insc\()l $0, %ecx
29455+
29456+.ifb \unchecked
29457+#ifdef CONFIG_PAX_REFCOUNT
29458+ into
29459+2:
29460+ _ASM_EXTABLE(2b, 3f)
29461+#endif
29462+.endif
29463+
29464 LOCK_PREFIX
29465 cmpxchg8b (%esi)
29466 jne 1b
29467
29468-10:
29469 movl %ebx, %eax
29470 movl %ecx, %edx
29471+
29472+.ifb \unchecked
29473+#ifdef CONFIG_PAX_REFCOUNT
29474+3:
29475+#endif
29476+.endif
29477+
29478 RESTORE ebx
29479+ pax_force_retaddr
29480 ret
29481 CFI_ENDPROC
29482-ENDPROC(atomic64_\func\()_return_cx8)
29483+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
29484 .endm
29485
29486 incdec_return inc add adc
29487 incdec_return dec sub sbb
29488+incdec_return inc add adc _unchecked
29489+incdec_return dec sub sbb _unchecked
29490
29491 ENTRY(atomic64_dec_if_positive_cx8)
29492 CFI_STARTPROC
29493@@ -138,6 +199,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
29494 movl %edx, %ecx
29495 subl $1, %ebx
29496 sbb $0, %ecx
29497+
29498+#ifdef CONFIG_PAX_REFCOUNT
29499+ into
29500+1234:
29501+ _ASM_EXTABLE(1234b, 2f)
29502+#endif
29503+
29504 js 2f
29505 LOCK_PREFIX
29506 cmpxchg8b (%esi)
29507@@ -147,6 +215,7 @@ ENTRY(atomic64_dec_if_positive_cx8)
29508 movl %ebx, %eax
29509 movl %ecx, %edx
29510 RESTORE ebx
29511+ pax_force_retaddr
29512 ret
29513 CFI_ENDPROC
29514 ENDPROC(atomic64_dec_if_positive_cx8)
29515@@ -171,6 +240,13 @@ ENTRY(atomic64_add_unless_cx8)
29516 movl %edx, %ecx
29517 addl %ebp, %ebx
29518 adcl %edi, %ecx
29519+
29520+#ifdef CONFIG_PAX_REFCOUNT
29521+ into
29522+1234:
29523+ _ASM_EXTABLE(1234b, 3f)
29524+#endif
29525+
29526 LOCK_PREFIX
29527 cmpxchg8b (%esi)
29528 jne 1b
29529@@ -181,6 +257,7 @@ ENTRY(atomic64_add_unless_cx8)
29530 CFI_ADJUST_CFA_OFFSET -8
29531 RESTORE ebx
29532 RESTORE ebp
29533+ pax_force_retaddr
29534 ret
29535 4:
29536 cmpl %edx, 4(%esp)
29537@@ -203,6 +280,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
29538 xorl %ecx, %ecx
29539 addl $1, %ebx
29540 adcl %edx, %ecx
29541+
29542+#ifdef CONFIG_PAX_REFCOUNT
29543+ into
29544+1234:
29545+ _ASM_EXTABLE(1234b, 3f)
29546+#endif
29547+
29548 LOCK_PREFIX
29549 cmpxchg8b (%esi)
29550 jne 1b
29551@@ -210,6 +294,7 @@ ENTRY(atomic64_inc_not_zero_cx8)
29552 movl $1, %eax
29553 3:
29554 RESTORE ebx
29555+ pax_force_retaddr
29556 ret
29557 CFI_ENDPROC
29558 ENDPROC(atomic64_inc_not_zero_cx8)
29559diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
29560index e78b8eee..7e173a8 100644
29561--- a/arch/x86/lib/checksum_32.S
29562+++ b/arch/x86/lib/checksum_32.S
29563@@ -29,7 +29,8 @@
29564 #include <asm/dwarf2.h>
29565 #include <asm/errno.h>
29566 #include <asm/asm.h>
29567-
29568+#include <asm/segment.h>
29569+
29570 /*
29571 * computes a partial checksum, e.g. for TCP/UDP fragments
29572 */
29573@@ -293,9 +294,24 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
29574
29575 #define ARGBASE 16
29576 #define FP 12
29577-
29578-ENTRY(csum_partial_copy_generic)
29579+
29580+ENTRY(csum_partial_copy_generic_to_user)
29581 CFI_STARTPROC
29582+
29583+#ifdef CONFIG_PAX_MEMORY_UDEREF
29584+ pushl_cfi %gs
29585+ popl_cfi %es
29586+ jmp csum_partial_copy_generic
29587+#endif
29588+
29589+ENTRY(csum_partial_copy_generic_from_user)
29590+
29591+#ifdef CONFIG_PAX_MEMORY_UDEREF
29592+ pushl_cfi %gs
29593+ popl_cfi %ds
29594+#endif
29595+
29596+ENTRY(csum_partial_copy_generic)
29597 subl $4,%esp
29598 CFI_ADJUST_CFA_OFFSET 4
29599 pushl_cfi %edi
29600@@ -317,7 +333,7 @@ ENTRY(csum_partial_copy_generic)
29601 jmp 4f
29602 SRC(1: movw (%esi), %bx )
29603 addl $2, %esi
29604-DST( movw %bx, (%edi) )
29605+DST( movw %bx, %es:(%edi) )
29606 addl $2, %edi
29607 addw %bx, %ax
29608 adcl $0, %eax
29609@@ -329,30 +345,30 @@ DST( movw %bx, (%edi) )
29610 SRC(1: movl (%esi), %ebx )
29611 SRC( movl 4(%esi), %edx )
29612 adcl %ebx, %eax
29613-DST( movl %ebx, (%edi) )
29614+DST( movl %ebx, %es:(%edi) )
29615 adcl %edx, %eax
29616-DST( movl %edx, 4(%edi) )
29617+DST( movl %edx, %es:4(%edi) )
29618
29619 SRC( movl 8(%esi), %ebx )
29620 SRC( movl 12(%esi), %edx )
29621 adcl %ebx, %eax
29622-DST( movl %ebx, 8(%edi) )
29623+DST( movl %ebx, %es:8(%edi) )
29624 adcl %edx, %eax
29625-DST( movl %edx, 12(%edi) )
29626+DST( movl %edx, %es:12(%edi) )
29627
29628 SRC( movl 16(%esi), %ebx )
29629 SRC( movl 20(%esi), %edx )
29630 adcl %ebx, %eax
29631-DST( movl %ebx, 16(%edi) )
29632+DST( movl %ebx, %es:16(%edi) )
29633 adcl %edx, %eax
29634-DST( movl %edx, 20(%edi) )
29635+DST( movl %edx, %es:20(%edi) )
29636
29637 SRC( movl 24(%esi), %ebx )
29638 SRC( movl 28(%esi), %edx )
29639 adcl %ebx, %eax
29640-DST( movl %ebx, 24(%edi) )
29641+DST( movl %ebx, %es:24(%edi) )
29642 adcl %edx, %eax
29643-DST( movl %edx, 28(%edi) )
29644+DST( movl %edx, %es:28(%edi) )
29645
29646 lea 32(%esi), %esi
29647 lea 32(%edi), %edi
29648@@ -366,7 +382,7 @@ DST( movl %edx, 28(%edi) )
29649 shrl $2, %edx # This clears CF
29650 SRC(3: movl (%esi), %ebx )
29651 adcl %ebx, %eax
29652-DST( movl %ebx, (%edi) )
29653+DST( movl %ebx, %es:(%edi) )
29654 lea 4(%esi), %esi
29655 lea 4(%edi), %edi
29656 dec %edx
29657@@ -378,12 +394,12 @@ DST( movl %ebx, (%edi) )
29658 jb 5f
29659 SRC( movw (%esi), %cx )
29660 leal 2(%esi), %esi
29661-DST( movw %cx, (%edi) )
29662+DST( movw %cx, %es:(%edi) )
29663 leal 2(%edi), %edi
29664 je 6f
29665 shll $16,%ecx
29666 SRC(5: movb (%esi), %cl )
29667-DST( movb %cl, (%edi) )
29668+DST( movb %cl, %es:(%edi) )
29669 6: addl %ecx, %eax
29670 adcl $0, %eax
29671 7:
29672@@ -394,7 +410,7 @@ DST( movb %cl, (%edi) )
29673
29674 6001:
29675 movl ARGBASE+20(%esp), %ebx # src_err_ptr
29676- movl $-EFAULT, (%ebx)
29677+ movl $-EFAULT, %ss:(%ebx)
29678
29679 # zero the complete destination - computing the rest
29680 # is too much work
29681@@ -407,11 +423,15 @@ DST( movb %cl, (%edi) )
29682
29683 6002:
29684 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
29685- movl $-EFAULT,(%ebx)
29686+ movl $-EFAULT,%ss:(%ebx)
29687 jmp 5000b
29688
29689 .previous
29690
29691+ pushl_cfi %ss
29692+ popl_cfi %ds
29693+ pushl_cfi %ss
29694+ popl_cfi %es
29695 popl_cfi %ebx
29696 CFI_RESTORE ebx
29697 popl_cfi %esi
29698@@ -421,26 +441,43 @@ DST( movb %cl, (%edi) )
29699 popl_cfi %ecx # equivalent to addl $4,%esp
29700 ret
29701 CFI_ENDPROC
29702-ENDPROC(csum_partial_copy_generic)
29703+ENDPROC(csum_partial_copy_generic_to_user)
29704
29705 #else
29706
29707 /* Version for PentiumII/PPro */
29708
29709 #define ROUND1(x) \
29710+ nop; nop; nop; \
29711 SRC(movl x(%esi), %ebx ) ; \
29712 addl %ebx, %eax ; \
29713- DST(movl %ebx, x(%edi) ) ;
29714+ DST(movl %ebx, %es:x(%edi)) ;
29715
29716 #define ROUND(x) \
29717+ nop; nop; nop; \
29718 SRC(movl x(%esi), %ebx ) ; \
29719 adcl %ebx, %eax ; \
29720- DST(movl %ebx, x(%edi) ) ;
29721+ DST(movl %ebx, %es:x(%edi)) ;
29722
29723 #define ARGBASE 12
29724-
29725-ENTRY(csum_partial_copy_generic)
29726+
29727+ENTRY(csum_partial_copy_generic_to_user)
29728 CFI_STARTPROC
29729+
29730+#ifdef CONFIG_PAX_MEMORY_UDEREF
29731+ pushl_cfi %gs
29732+ popl_cfi %es
29733+ jmp csum_partial_copy_generic
29734+#endif
29735+
29736+ENTRY(csum_partial_copy_generic_from_user)
29737+
29738+#ifdef CONFIG_PAX_MEMORY_UDEREF
29739+ pushl_cfi %gs
29740+ popl_cfi %ds
29741+#endif
29742+
29743+ENTRY(csum_partial_copy_generic)
29744 pushl_cfi %ebx
29745 CFI_REL_OFFSET ebx, 0
29746 pushl_cfi %edi
29747@@ -461,7 +498,7 @@ ENTRY(csum_partial_copy_generic)
29748 subl %ebx, %edi
29749 lea -1(%esi),%edx
29750 andl $-32,%edx
29751- lea 3f(%ebx,%ebx), %ebx
29752+ lea 3f(%ebx,%ebx,2), %ebx
29753 testl %esi, %esi
29754 jmp *%ebx
29755 1: addl $64,%esi
29756@@ -482,19 +519,19 @@ ENTRY(csum_partial_copy_generic)
29757 jb 5f
29758 SRC( movw (%esi), %dx )
29759 leal 2(%esi), %esi
29760-DST( movw %dx, (%edi) )
29761+DST( movw %dx, %es:(%edi) )
29762 leal 2(%edi), %edi
29763 je 6f
29764 shll $16,%edx
29765 5:
29766 SRC( movb (%esi), %dl )
29767-DST( movb %dl, (%edi) )
29768+DST( movb %dl, %es:(%edi) )
29769 6: addl %edx, %eax
29770 adcl $0, %eax
29771 7:
29772 .section .fixup, "ax"
29773 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
29774- movl $-EFAULT, (%ebx)
29775+ movl $-EFAULT, %ss:(%ebx)
29776 # zero the complete destination (computing the rest is too much work)
29777 movl ARGBASE+8(%esp),%edi # dst
29778 movl ARGBASE+12(%esp),%ecx # len
29779@@ -502,10 +539,17 @@ DST( movb %dl, (%edi) )
29780 rep; stosb
29781 jmp 7b
29782 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
29783- movl $-EFAULT, (%ebx)
29784+ movl $-EFAULT, %ss:(%ebx)
29785 jmp 7b
29786 .previous
29787
29788+#ifdef CONFIG_PAX_MEMORY_UDEREF
29789+ pushl_cfi %ss
29790+ popl_cfi %ds
29791+ pushl_cfi %ss
29792+ popl_cfi %es
29793+#endif
29794+
29795 popl_cfi %esi
29796 CFI_RESTORE esi
29797 popl_cfi %edi
29798@@ -514,7 +558,7 @@ DST( movb %dl, (%edi) )
29799 CFI_RESTORE ebx
29800 ret
29801 CFI_ENDPROC
29802-ENDPROC(csum_partial_copy_generic)
29803+ENDPROC(csum_partial_copy_generic_to_user)
29804
29805 #undef ROUND
29806 #undef ROUND1
29807diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
29808index f2145cf..cea889d 100644
29809--- a/arch/x86/lib/clear_page_64.S
29810+++ b/arch/x86/lib/clear_page_64.S
29811@@ -11,6 +11,7 @@ ENTRY(clear_page_c)
29812 movl $4096/8,%ecx
29813 xorl %eax,%eax
29814 rep stosq
29815+ pax_force_retaddr
29816 ret
29817 CFI_ENDPROC
29818 ENDPROC(clear_page_c)
29819@@ -20,6 +21,7 @@ ENTRY(clear_page_c_e)
29820 movl $4096,%ecx
29821 xorl %eax,%eax
29822 rep stosb
29823+ pax_force_retaddr
29824 ret
29825 CFI_ENDPROC
29826 ENDPROC(clear_page_c_e)
29827@@ -43,6 +45,7 @@ ENTRY(clear_page)
29828 leaq 64(%rdi),%rdi
29829 jnz .Lloop
29830 nop
29831+ pax_force_retaddr
29832 ret
29833 CFI_ENDPROC
29834 .Lclear_page_end:
29835@@ -58,7 +61,7 @@ ENDPROC(clear_page)
29836
29837 #include <asm/cpufeature.h>
29838
29839- .section .altinstr_replacement,"ax"
29840+ .section .altinstr_replacement,"a"
29841 1: .byte 0xeb /* jmp <disp8> */
29842 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
29843 2: .byte 0xeb /* jmp <disp8> */
29844diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S
29845index 40a1725..5d12ac4 100644
29846--- a/arch/x86/lib/cmpxchg16b_emu.S
29847+++ b/arch/x86/lib/cmpxchg16b_emu.S
29848@@ -8,6 +8,7 @@
29849 #include <linux/linkage.h>
29850 #include <asm/dwarf2.h>
29851 #include <asm/percpu.h>
29852+#include <asm/alternative-asm.h>
29853
29854 .text
29855
29856@@ -46,12 +47,14 @@ CFI_STARTPROC
29857 CFI_REMEMBER_STATE
29858 popfq_cfi
29859 mov $1, %al
29860+ pax_force_retaddr
29861 ret
29862
29863 CFI_RESTORE_STATE
29864 .Lnot_same:
29865 popfq_cfi
29866 xor %al,%al
29867+ pax_force_retaddr
29868 ret
29869
29870 CFI_ENDPROC
29871diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
29872index 176cca6..e0d658e 100644
29873--- a/arch/x86/lib/copy_page_64.S
29874+++ b/arch/x86/lib/copy_page_64.S
29875@@ -9,6 +9,7 @@ copy_page_rep:
29876 CFI_STARTPROC
29877 movl $4096/8, %ecx
29878 rep movsq
29879+ pax_force_retaddr
29880 ret
29881 CFI_ENDPROC
29882 ENDPROC(copy_page_rep)
29883@@ -24,8 +25,8 @@ ENTRY(copy_page)
29884 CFI_ADJUST_CFA_OFFSET 2*8
29885 movq %rbx, (%rsp)
29886 CFI_REL_OFFSET rbx, 0
29887- movq %r12, 1*8(%rsp)
29888- CFI_REL_OFFSET r12, 1*8
29889+ movq %r13, 1*8(%rsp)
29890+ CFI_REL_OFFSET r13, 1*8
29891
29892 movl $(4096/64)-5, %ecx
29893 .p2align 4
29894@@ -38,7 +39,7 @@ ENTRY(copy_page)
29895 movq 0x8*4(%rsi), %r9
29896 movq 0x8*5(%rsi), %r10
29897 movq 0x8*6(%rsi), %r11
29898- movq 0x8*7(%rsi), %r12
29899+ movq 0x8*7(%rsi), %r13
29900
29901 prefetcht0 5*64(%rsi)
29902
29903@@ -49,7 +50,7 @@ ENTRY(copy_page)
29904 movq %r9, 0x8*4(%rdi)
29905 movq %r10, 0x8*5(%rdi)
29906 movq %r11, 0x8*6(%rdi)
29907- movq %r12, 0x8*7(%rdi)
29908+ movq %r13, 0x8*7(%rdi)
29909
29910 leaq 64 (%rsi), %rsi
29911 leaq 64 (%rdi), %rdi
29912@@ -68,7 +69,7 @@ ENTRY(copy_page)
29913 movq 0x8*4(%rsi), %r9
29914 movq 0x8*5(%rsi), %r10
29915 movq 0x8*6(%rsi), %r11
29916- movq 0x8*7(%rsi), %r12
29917+ movq 0x8*7(%rsi), %r13
29918
29919 movq %rax, 0x8*0(%rdi)
29920 movq %rbx, 0x8*1(%rdi)
29921@@ -77,7 +78,7 @@ ENTRY(copy_page)
29922 movq %r9, 0x8*4(%rdi)
29923 movq %r10, 0x8*5(%rdi)
29924 movq %r11, 0x8*6(%rdi)
29925- movq %r12, 0x8*7(%rdi)
29926+ movq %r13, 0x8*7(%rdi)
29927
29928 leaq 64(%rdi), %rdi
29929 leaq 64(%rsi), %rsi
29930@@ -85,10 +86,11 @@ ENTRY(copy_page)
29931
29932 movq (%rsp), %rbx
29933 CFI_RESTORE rbx
29934- movq 1*8(%rsp), %r12
29935- CFI_RESTORE r12
29936+ movq 1*8(%rsp), %r13
29937+ CFI_RESTORE r13
29938 addq $2*8, %rsp
29939 CFI_ADJUST_CFA_OFFSET -2*8
29940+ pax_force_retaddr
29941 ret
29942 .Lcopy_page_end:
29943 CFI_ENDPROC
29944@@ -99,7 +101,7 @@ ENDPROC(copy_page)
29945
29946 #include <asm/cpufeature.h>
29947
29948- .section .altinstr_replacement,"ax"
29949+ .section .altinstr_replacement,"a"
29950 1: .byte 0xeb /* jmp <disp8> */
29951 .byte (copy_page_rep - copy_page) - (2f - 1b) /* offset */
29952 2:
29953diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
29954index dee945d..a84067b 100644
29955--- a/arch/x86/lib/copy_user_64.S
29956+++ b/arch/x86/lib/copy_user_64.S
29957@@ -18,31 +18,7 @@
29958 #include <asm/alternative-asm.h>
29959 #include <asm/asm.h>
29960 #include <asm/smap.h>
29961-
29962-/*
29963- * By placing feature2 after feature1 in altinstructions section, we logically
29964- * implement:
29965- * If CPU has feature2, jmp to alt2 is used
29966- * else if CPU has feature1, jmp to alt1 is used
29967- * else jmp to orig is used.
29968- */
29969- .macro ALTERNATIVE_JUMP feature1,feature2,orig,alt1,alt2
29970-0:
29971- .byte 0xe9 /* 32bit jump */
29972- .long \orig-1f /* by default jump to orig */
29973-1:
29974- .section .altinstr_replacement,"ax"
29975-2: .byte 0xe9 /* near jump with 32bit immediate */
29976- .long \alt1-1b /* offset */ /* or alternatively to alt1 */
29977-3: .byte 0xe9 /* near jump with 32bit immediate */
29978- .long \alt2-1b /* offset */ /* or alternatively to alt2 */
29979- .previous
29980-
29981- .section .altinstructions,"a"
29982- altinstruction_entry 0b,2b,\feature1,5,5
29983- altinstruction_entry 0b,3b,\feature2,5,5
29984- .previous
29985- .endm
29986+#include <asm/pgtable.h>
29987
29988 .macro ALIGN_DESTINATION
29989 #ifdef FIX_ALIGNMENT
29990@@ -70,52 +46,6 @@
29991 #endif
29992 .endm
29993
29994-/* Standard copy_to_user with segment limit checking */
29995-ENTRY(_copy_to_user)
29996- CFI_STARTPROC
29997- GET_THREAD_INFO(%rax)
29998- movq %rdi,%rcx
29999- addq %rdx,%rcx
30000- jc bad_to_user
30001- cmpq TI_addr_limit(%rax),%rcx
30002- ja bad_to_user
30003- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
30004- copy_user_generic_unrolled,copy_user_generic_string, \
30005- copy_user_enhanced_fast_string
30006- CFI_ENDPROC
30007-ENDPROC(_copy_to_user)
30008-
30009-/* Standard copy_from_user with segment limit checking */
30010-ENTRY(_copy_from_user)
30011- CFI_STARTPROC
30012- GET_THREAD_INFO(%rax)
30013- movq %rsi,%rcx
30014- addq %rdx,%rcx
30015- jc bad_from_user
30016- cmpq TI_addr_limit(%rax),%rcx
30017- ja bad_from_user
30018- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
30019- copy_user_generic_unrolled,copy_user_generic_string, \
30020- copy_user_enhanced_fast_string
30021- CFI_ENDPROC
30022-ENDPROC(_copy_from_user)
30023-
30024- .section .fixup,"ax"
30025- /* must zero dest */
30026-ENTRY(bad_from_user)
30027-bad_from_user:
30028- CFI_STARTPROC
30029- movl %edx,%ecx
30030- xorl %eax,%eax
30031- rep
30032- stosb
30033-bad_to_user:
30034- movl %edx,%eax
30035- ret
30036- CFI_ENDPROC
30037-ENDPROC(bad_from_user)
30038- .previous
30039-
30040 /*
30041 * copy_user_generic_unrolled - memory copy with exception handling.
30042 * This version is for CPUs like P4 that don't have efficient micro
30043@@ -131,6 +61,7 @@ ENDPROC(bad_from_user)
30044 */
30045 ENTRY(copy_user_generic_unrolled)
30046 CFI_STARTPROC
30047+ ASM_PAX_OPEN_USERLAND
30048 ASM_STAC
30049 cmpl $8,%edx
30050 jb 20f /* less then 8 bytes, go to byte copy loop */
30051@@ -180,6 +111,8 @@ ENTRY(copy_user_generic_unrolled)
30052 jnz 21b
30053 23: xor %eax,%eax
30054 ASM_CLAC
30055+ ASM_PAX_CLOSE_USERLAND
30056+ pax_force_retaddr
30057 ret
30058
30059 .section .fixup,"ax"
30060@@ -235,6 +168,7 @@ ENDPROC(copy_user_generic_unrolled)
30061 */
30062 ENTRY(copy_user_generic_string)
30063 CFI_STARTPROC
30064+ ASM_PAX_OPEN_USERLAND
30065 ASM_STAC
30066 cmpl $8,%edx
30067 jb 2f /* less than 8 bytes, go to byte copy loop */
30068@@ -249,6 +183,8 @@ ENTRY(copy_user_generic_string)
30069 movsb
30070 xorl %eax,%eax
30071 ASM_CLAC
30072+ ASM_PAX_CLOSE_USERLAND
30073+ pax_force_retaddr
30074 ret
30075
30076 .section .fixup,"ax"
30077@@ -276,12 +212,15 @@ ENDPROC(copy_user_generic_string)
30078 */
30079 ENTRY(copy_user_enhanced_fast_string)
30080 CFI_STARTPROC
30081+ ASM_PAX_OPEN_USERLAND
30082 ASM_STAC
30083 movl %edx,%ecx
30084 1: rep
30085 movsb
30086 xorl %eax,%eax
30087 ASM_CLAC
30088+ ASM_PAX_CLOSE_USERLAND
30089+ pax_force_retaddr
30090 ret
30091
30092 .section .fixup,"ax"
30093diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
30094index 6a4f43c..c70fb52 100644
30095--- a/arch/x86/lib/copy_user_nocache_64.S
30096+++ b/arch/x86/lib/copy_user_nocache_64.S
30097@@ -8,6 +8,7 @@
30098
30099 #include <linux/linkage.h>
30100 #include <asm/dwarf2.h>
30101+#include <asm/alternative-asm.h>
30102
30103 #define FIX_ALIGNMENT 1
30104
30105@@ -16,6 +17,7 @@
30106 #include <asm/thread_info.h>
30107 #include <asm/asm.h>
30108 #include <asm/smap.h>
30109+#include <asm/pgtable.h>
30110
30111 .macro ALIGN_DESTINATION
30112 #ifdef FIX_ALIGNMENT
30113@@ -49,6 +51,16 @@
30114 */
30115 ENTRY(__copy_user_nocache)
30116 CFI_STARTPROC
30117+
30118+#ifdef CONFIG_PAX_MEMORY_UDEREF
30119+ mov pax_user_shadow_base,%rcx
30120+ cmp %rcx,%rsi
30121+ jae 1f
30122+ add %rcx,%rsi
30123+1:
30124+#endif
30125+
30126+ ASM_PAX_OPEN_USERLAND
30127 ASM_STAC
30128 cmpl $8,%edx
30129 jb 20f /* less then 8 bytes, go to byte copy loop */
30130@@ -98,7 +110,9 @@ ENTRY(__copy_user_nocache)
30131 jnz 21b
30132 23: xorl %eax,%eax
30133 ASM_CLAC
30134+ ASM_PAX_CLOSE_USERLAND
30135 sfence
30136+ pax_force_retaddr
30137 ret
30138
30139 .section .fixup,"ax"
30140diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
30141index 2419d5f..fe52d0e 100644
30142--- a/arch/x86/lib/csum-copy_64.S
30143+++ b/arch/x86/lib/csum-copy_64.S
30144@@ -9,6 +9,7 @@
30145 #include <asm/dwarf2.h>
30146 #include <asm/errno.h>
30147 #include <asm/asm.h>
30148+#include <asm/alternative-asm.h>
30149
30150 /*
30151 * Checksum copy with exception handling.
30152@@ -56,8 +57,8 @@ ENTRY(csum_partial_copy_generic)
30153 CFI_ADJUST_CFA_OFFSET 7*8
30154 movq %rbx, 2*8(%rsp)
30155 CFI_REL_OFFSET rbx, 2*8
30156- movq %r12, 3*8(%rsp)
30157- CFI_REL_OFFSET r12, 3*8
30158+ movq %r15, 3*8(%rsp)
30159+ CFI_REL_OFFSET r15, 3*8
30160 movq %r14, 4*8(%rsp)
30161 CFI_REL_OFFSET r14, 4*8
30162 movq %r13, 5*8(%rsp)
30163@@ -72,16 +73,16 @@ ENTRY(csum_partial_copy_generic)
30164 movl %edx, %ecx
30165
30166 xorl %r9d, %r9d
30167- movq %rcx, %r12
30168+ movq %rcx, %r15
30169
30170- shrq $6, %r12
30171+ shrq $6, %r15
30172 jz .Lhandle_tail /* < 64 */
30173
30174 clc
30175
30176 /* main loop. clear in 64 byte blocks */
30177 /* r9: zero, r8: temp2, rbx: temp1, rax: sum, rcx: saved length */
30178- /* r11: temp3, rdx: temp4, r12 loopcnt */
30179+ /* r11: temp3, rdx: temp4, r15 loopcnt */
30180 /* r10: temp5, rbp: temp6, r14 temp7, r13 temp8 */
30181 .p2align 4
30182 .Lloop:
30183@@ -115,7 +116,7 @@ ENTRY(csum_partial_copy_generic)
30184 adcq %r14, %rax
30185 adcq %r13, %rax
30186
30187- decl %r12d
30188+ decl %r15d
30189
30190 dest
30191 movq %rbx, (%rsi)
30192@@ -210,8 +211,8 @@ ENTRY(csum_partial_copy_generic)
30193 .Lende:
30194 movq 2*8(%rsp), %rbx
30195 CFI_RESTORE rbx
30196- movq 3*8(%rsp), %r12
30197- CFI_RESTORE r12
30198+ movq 3*8(%rsp), %r15
30199+ CFI_RESTORE r15
30200 movq 4*8(%rsp), %r14
30201 CFI_RESTORE r14
30202 movq 5*8(%rsp), %r13
30203@@ -220,6 +221,7 @@ ENTRY(csum_partial_copy_generic)
30204 CFI_RESTORE rbp
30205 addq $7*8, %rsp
30206 CFI_ADJUST_CFA_OFFSET -7*8
30207+ pax_force_retaddr
30208 ret
30209 CFI_RESTORE_STATE
30210
30211diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
30212index 1318f75..44c30fd 100644
30213--- a/arch/x86/lib/csum-wrappers_64.c
30214+++ b/arch/x86/lib/csum-wrappers_64.c
30215@@ -52,10 +52,12 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
30216 len -= 2;
30217 }
30218 }
30219+ pax_open_userland();
30220 stac();
30221- isum = csum_partial_copy_generic((__force const void *)src,
30222+ isum = csum_partial_copy_generic((const void __force_kernel *)____m(src),
30223 dst, len, isum, errp, NULL);
30224 clac();
30225+ pax_close_userland();
30226 if (unlikely(*errp))
30227 goto out_err;
30228
30229@@ -109,10 +111,12 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
30230 }
30231
30232 *errp = 0;
30233+ pax_open_userland();
30234 stac();
30235- ret = csum_partial_copy_generic(src, (void __force *)dst,
30236+ ret = csum_partial_copy_generic(src, (void __force_kernel *)____m(dst),
30237 len, isum, NULL, errp);
30238 clac();
30239+ pax_close_userland();
30240 return ret;
30241 }
30242 EXPORT_SYMBOL(csum_partial_copy_to_user);
30243diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
30244index a451235..a74bfa3 100644
30245--- a/arch/x86/lib/getuser.S
30246+++ b/arch/x86/lib/getuser.S
30247@@ -33,17 +33,40 @@
30248 #include <asm/thread_info.h>
30249 #include <asm/asm.h>
30250 #include <asm/smap.h>
30251+#include <asm/segment.h>
30252+#include <asm/pgtable.h>
30253+#include <asm/alternative-asm.h>
30254+
30255+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
30256+#define __copyuser_seg gs;
30257+#else
30258+#define __copyuser_seg
30259+#endif
30260
30261 .text
30262 ENTRY(__get_user_1)
30263 CFI_STARTPROC
30264+
30265+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30266 GET_THREAD_INFO(%_ASM_DX)
30267 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
30268 jae bad_get_user
30269+
30270+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30271+ mov pax_user_shadow_base,%_ASM_DX
30272+ cmp %_ASM_DX,%_ASM_AX
30273+ jae 1234f
30274+ add %_ASM_DX,%_ASM_AX
30275+1234:
30276+#endif
30277+
30278+#endif
30279+
30280 ASM_STAC
30281-1: movzbl (%_ASM_AX),%edx
30282+1: __copyuser_seg movzbl (%_ASM_AX),%edx
30283 xor %eax,%eax
30284 ASM_CLAC
30285+ pax_force_retaddr
30286 ret
30287 CFI_ENDPROC
30288 ENDPROC(__get_user_1)
30289@@ -51,14 +74,28 @@ ENDPROC(__get_user_1)
30290 ENTRY(__get_user_2)
30291 CFI_STARTPROC
30292 add $1,%_ASM_AX
30293+
30294+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30295 jc bad_get_user
30296 GET_THREAD_INFO(%_ASM_DX)
30297 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
30298 jae bad_get_user
30299+
30300+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30301+ mov pax_user_shadow_base,%_ASM_DX
30302+ cmp %_ASM_DX,%_ASM_AX
30303+ jae 1234f
30304+ add %_ASM_DX,%_ASM_AX
30305+1234:
30306+#endif
30307+
30308+#endif
30309+
30310 ASM_STAC
30311-2: movzwl -1(%_ASM_AX),%edx
30312+2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
30313 xor %eax,%eax
30314 ASM_CLAC
30315+ pax_force_retaddr
30316 ret
30317 CFI_ENDPROC
30318 ENDPROC(__get_user_2)
30319@@ -66,14 +103,28 @@ ENDPROC(__get_user_2)
30320 ENTRY(__get_user_4)
30321 CFI_STARTPROC
30322 add $3,%_ASM_AX
30323+
30324+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30325 jc bad_get_user
30326 GET_THREAD_INFO(%_ASM_DX)
30327 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
30328 jae bad_get_user
30329+
30330+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30331+ mov pax_user_shadow_base,%_ASM_DX
30332+ cmp %_ASM_DX,%_ASM_AX
30333+ jae 1234f
30334+ add %_ASM_DX,%_ASM_AX
30335+1234:
30336+#endif
30337+
30338+#endif
30339+
30340 ASM_STAC
30341-3: movl -3(%_ASM_AX),%edx
30342+3: __copyuser_seg movl -3(%_ASM_AX),%edx
30343 xor %eax,%eax
30344 ASM_CLAC
30345+ pax_force_retaddr
30346 ret
30347 CFI_ENDPROC
30348 ENDPROC(__get_user_4)
30349@@ -86,10 +137,20 @@ ENTRY(__get_user_8)
30350 GET_THREAD_INFO(%_ASM_DX)
30351 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
30352 jae bad_get_user
30353+
30354+#ifdef CONFIG_PAX_MEMORY_UDEREF
30355+ mov pax_user_shadow_base,%_ASM_DX
30356+ cmp %_ASM_DX,%_ASM_AX
30357+ jae 1234f
30358+ add %_ASM_DX,%_ASM_AX
30359+1234:
30360+#endif
30361+
30362 ASM_STAC
30363 4: movq -7(%_ASM_AX),%rdx
30364 xor %eax,%eax
30365 ASM_CLAC
30366+ pax_force_retaddr
30367 ret
30368 #else
30369 add $7,%_ASM_AX
30370@@ -98,10 +159,11 @@ ENTRY(__get_user_8)
30371 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
30372 jae bad_get_user_8
30373 ASM_STAC
30374-4: movl -7(%_ASM_AX),%edx
30375-5: movl -3(%_ASM_AX),%ecx
30376+4: __copyuser_seg movl -7(%_ASM_AX),%edx
30377+5: __copyuser_seg movl -3(%_ASM_AX),%ecx
30378 xor %eax,%eax
30379 ASM_CLAC
30380+ pax_force_retaddr
30381 ret
30382 #endif
30383 CFI_ENDPROC
30384@@ -113,6 +175,7 @@ bad_get_user:
30385 xor %edx,%edx
30386 mov $(-EFAULT),%_ASM_AX
30387 ASM_CLAC
30388+ pax_force_retaddr
30389 ret
30390 CFI_ENDPROC
30391 END(bad_get_user)
30392@@ -124,6 +187,7 @@ bad_get_user_8:
30393 xor %ecx,%ecx
30394 mov $(-EFAULT),%_ASM_AX
30395 ASM_CLAC
30396+ pax_force_retaddr
30397 ret
30398 CFI_ENDPROC
30399 END(bad_get_user_8)
30400diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
30401index 85994f5..9929d7f 100644
30402--- a/arch/x86/lib/insn.c
30403+++ b/arch/x86/lib/insn.c
30404@@ -20,8 +20,10 @@
30405
30406 #ifdef __KERNEL__
30407 #include <linux/string.h>
30408+#include <asm/pgtable_types.h>
30409 #else
30410 #include <string.h>
30411+#define ktla_ktva(addr) addr
30412 #endif
30413 #include <asm/inat.h>
30414 #include <asm/insn.h>
30415@@ -60,9 +62,9 @@ void insn_init(struct insn *insn, const void *kaddr, int buf_len, int x86_64)
30416 buf_len = MAX_INSN_SIZE;
30417
30418 memset(insn, 0, sizeof(*insn));
30419- insn->kaddr = kaddr;
30420- insn->end_kaddr = kaddr + buf_len;
30421- insn->next_byte = kaddr;
30422+ insn->kaddr = ktla_ktva(kaddr);
30423+ insn->end_kaddr = insn->kaddr + buf_len;
30424+ insn->next_byte = insn->kaddr;
30425 insn->x86_64 = x86_64 ? 1 : 0;
30426 insn->opnd_bytes = 4;
30427 if (x86_64)
30428diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
30429index 05a95e7..326f2fa 100644
30430--- a/arch/x86/lib/iomap_copy_64.S
30431+++ b/arch/x86/lib/iomap_copy_64.S
30432@@ -17,6 +17,7 @@
30433
30434 #include <linux/linkage.h>
30435 #include <asm/dwarf2.h>
30436+#include <asm/alternative-asm.h>
30437
30438 /*
30439 * override generic version in lib/iomap_copy.c
30440@@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
30441 CFI_STARTPROC
30442 movl %edx,%ecx
30443 rep movsd
30444+ pax_force_retaddr
30445 ret
30446 CFI_ENDPROC
30447 ENDPROC(__iowrite32_copy)
30448diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
30449index 89b53c9..97357ca 100644
30450--- a/arch/x86/lib/memcpy_64.S
30451+++ b/arch/x86/lib/memcpy_64.S
30452@@ -24,7 +24,7 @@
30453 * This gets patched over the unrolled variant (below) via the
30454 * alternative instructions framework:
30455 */
30456- .section .altinstr_replacement, "ax", @progbits
30457+ .section .altinstr_replacement, "a", @progbits
30458 .Lmemcpy_c:
30459 movq %rdi, %rax
30460 movq %rdx, %rcx
30461@@ -33,6 +33,7 @@
30462 rep movsq
30463 movl %edx, %ecx
30464 rep movsb
30465+ pax_force_retaddr
30466 ret
30467 .Lmemcpy_e:
30468 .previous
30469@@ -44,11 +45,12 @@
30470 * This gets patched over the unrolled variant (below) via the
30471 * alternative instructions framework:
30472 */
30473- .section .altinstr_replacement, "ax", @progbits
30474+ .section .altinstr_replacement, "a", @progbits
30475 .Lmemcpy_c_e:
30476 movq %rdi, %rax
30477 movq %rdx, %rcx
30478 rep movsb
30479+ pax_force_retaddr
30480 ret
30481 .Lmemcpy_e_e:
30482 .previous
30483@@ -138,6 +140,7 @@ ENTRY(memcpy)
30484 movq %r9, 1*8(%rdi)
30485 movq %r10, -2*8(%rdi, %rdx)
30486 movq %r11, -1*8(%rdi, %rdx)
30487+ pax_force_retaddr
30488 retq
30489 .p2align 4
30490 .Lless_16bytes:
30491@@ -150,6 +153,7 @@ ENTRY(memcpy)
30492 movq -1*8(%rsi, %rdx), %r9
30493 movq %r8, 0*8(%rdi)
30494 movq %r9, -1*8(%rdi, %rdx)
30495+ pax_force_retaddr
30496 retq
30497 .p2align 4
30498 .Lless_8bytes:
30499@@ -163,6 +167,7 @@ ENTRY(memcpy)
30500 movl -4(%rsi, %rdx), %r8d
30501 movl %ecx, (%rdi)
30502 movl %r8d, -4(%rdi, %rdx)
30503+ pax_force_retaddr
30504 retq
30505 .p2align 4
30506 .Lless_3bytes:
30507@@ -181,6 +186,7 @@ ENTRY(memcpy)
30508 movb %cl, (%rdi)
30509
30510 .Lend:
30511+ pax_force_retaddr
30512 retq
30513 CFI_ENDPROC
30514 ENDPROC(memcpy)
30515diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
30516index 9c4b530..830b77a 100644
30517--- a/arch/x86/lib/memmove_64.S
30518+++ b/arch/x86/lib/memmove_64.S
30519@@ -205,14 +205,16 @@ ENTRY(__memmove)
30520 movb (%rsi), %r11b
30521 movb %r11b, (%rdi)
30522 13:
30523+ pax_force_retaddr
30524 retq
30525 CFI_ENDPROC
30526
30527- .section .altinstr_replacement,"ax"
30528+ .section .altinstr_replacement,"a"
30529 .Lmemmove_begin_forward_efs:
30530 /* Forward moving data. */
30531 movq %rdx, %rcx
30532 rep movsb
30533+ pax_force_retaddr
30534 retq
30535 .Lmemmove_end_forward_efs:
30536 .previous
30537diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
30538index 6f44935..fbf5f6d 100644
30539--- a/arch/x86/lib/memset_64.S
30540+++ b/arch/x86/lib/memset_64.S
30541@@ -16,7 +16,7 @@
30542 *
30543 * rax original destination
30544 */
30545- .section .altinstr_replacement, "ax", @progbits
30546+ .section .altinstr_replacement, "a", @progbits
30547 .Lmemset_c:
30548 movq %rdi,%r9
30549 movq %rdx,%rcx
30550@@ -30,6 +30,7 @@
30551 movl %edx,%ecx
30552 rep stosb
30553 movq %r9,%rax
30554+ pax_force_retaddr
30555 ret
30556 .Lmemset_e:
30557 .previous
30558@@ -45,13 +46,14 @@
30559 *
30560 * rax original destination
30561 */
30562- .section .altinstr_replacement, "ax", @progbits
30563+ .section .altinstr_replacement, "a", @progbits
30564 .Lmemset_c_e:
30565 movq %rdi,%r9
30566 movb %sil,%al
30567 movq %rdx,%rcx
30568 rep stosb
30569 movq %r9,%rax
30570+ pax_force_retaddr
30571 ret
30572 .Lmemset_e_e:
30573 .previous
30574@@ -120,6 +122,7 @@ ENTRY(__memset)
30575
30576 .Lende:
30577 movq %r10,%rax
30578+ pax_force_retaddr
30579 ret
30580
30581 CFI_RESTORE_STATE
30582diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
30583index c9f2d9b..e7fd2c0 100644
30584--- a/arch/x86/lib/mmx_32.c
30585+++ b/arch/x86/lib/mmx_32.c
30586@@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
30587 {
30588 void *p;
30589 int i;
30590+ unsigned long cr0;
30591
30592 if (unlikely(in_interrupt()))
30593 return __memcpy(to, from, len);
30594@@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
30595 kernel_fpu_begin();
30596
30597 __asm__ __volatile__ (
30598- "1: prefetch (%0)\n" /* This set is 28 bytes */
30599- " prefetch 64(%0)\n"
30600- " prefetch 128(%0)\n"
30601- " prefetch 192(%0)\n"
30602- " prefetch 256(%0)\n"
30603+ "1: prefetch (%1)\n" /* This set is 28 bytes */
30604+ " prefetch 64(%1)\n"
30605+ " prefetch 128(%1)\n"
30606+ " prefetch 192(%1)\n"
30607+ " prefetch 256(%1)\n"
30608 "2: \n"
30609 ".section .fixup, \"ax\"\n"
30610- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30611+ "3: \n"
30612+
30613+#ifdef CONFIG_PAX_KERNEXEC
30614+ " movl %%cr0, %0\n"
30615+ " movl %0, %%eax\n"
30616+ " andl $0xFFFEFFFF, %%eax\n"
30617+ " movl %%eax, %%cr0\n"
30618+#endif
30619+
30620+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30621+
30622+#ifdef CONFIG_PAX_KERNEXEC
30623+ " movl %0, %%cr0\n"
30624+#endif
30625+
30626 " jmp 2b\n"
30627 ".previous\n"
30628 _ASM_EXTABLE(1b, 3b)
30629- : : "r" (from));
30630+ : "=&r" (cr0) : "r" (from) : "ax");
30631
30632 for ( ; i > 5; i--) {
30633 __asm__ __volatile__ (
30634- "1: prefetch 320(%0)\n"
30635- "2: movq (%0), %%mm0\n"
30636- " movq 8(%0), %%mm1\n"
30637- " movq 16(%0), %%mm2\n"
30638- " movq 24(%0), %%mm3\n"
30639- " movq %%mm0, (%1)\n"
30640- " movq %%mm1, 8(%1)\n"
30641- " movq %%mm2, 16(%1)\n"
30642- " movq %%mm3, 24(%1)\n"
30643- " movq 32(%0), %%mm0\n"
30644- " movq 40(%0), %%mm1\n"
30645- " movq 48(%0), %%mm2\n"
30646- " movq 56(%0), %%mm3\n"
30647- " movq %%mm0, 32(%1)\n"
30648- " movq %%mm1, 40(%1)\n"
30649- " movq %%mm2, 48(%1)\n"
30650- " movq %%mm3, 56(%1)\n"
30651+ "1: prefetch 320(%1)\n"
30652+ "2: movq (%1), %%mm0\n"
30653+ " movq 8(%1), %%mm1\n"
30654+ " movq 16(%1), %%mm2\n"
30655+ " movq 24(%1), %%mm3\n"
30656+ " movq %%mm0, (%2)\n"
30657+ " movq %%mm1, 8(%2)\n"
30658+ " movq %%mm2, 16(%2)\n"
30659+ " movq %%mm3, 24(%2)\n"
30660+ " movq 32(%1), %%mm0\n"
30661+ " movq 40(%1), %%mm1\n"
30662+ " movq 48(%1), %%mm2\n"
30663+ " movq 56(%1), %%mm3\n"
30664+ " movq %%mm0, 32(%2)\n"
30665+ " movq %%mm1, 40(%2)\n"
30666+ " movq %%mm2, 48(%2)\n"
30667+ " movq %%mm3, 56(%2)\n"
30668 ".section .fixup, \"ax\"\n"
30669- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30670+ "3:\n"
30671+
30672+#ifdef CONFIG_PAX_KERNEXEC
30673+ " movl %%cr0, %0\n"
30674+ " movl %0, %%eax\n"
30675+ " andl $0xFFFEFFFF, %%eax\n"
30676+ " movl %%eax, %%cr0\n"
30677+#endif
30678+
30679+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30680+
30681+#ifdef CONFIG_PAX_KERNEXEC
30682+ " movl %0, %%cr0\n"
30683+#endif
30684+
30685 " jmp 2b\n"
30686 ".previous\n"
30687 _ASM_EXTABLE(1b, 3b)
30688- : : "r" (from), "r" (to) : "memory");
30689+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
30690
30691 from += 64;
30692 to += 64;
30693@@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
30694 static void fast_copy_page(void *to, void *from)
30695 {
30696 int i;
30697+ unsigned long cr0;
30698
30699 kernel_fpu_begin();
30700
30701@@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from)
30702 * but that is for later. -AV
30703 */
30704 __asm__ __volatile__(
30705- "1: prefetch (%0)\n"
30706- " prefetch 64(%0)\n"
30707- " prefetch 128(%0)\n"
30708- " prefetch 192(%0)\n"
30709- " prefetch 256(%0)\n"
30710+ "1: prefetch (%1)\n"
30711+ " prefetch 64(%1)\n"
30712+ " prefetch 128(%1)\n"
30713+ " prefetch 192(%1)\n"
30714+ " prefetch 256(%1)\n"
30715 "2: \n"
30716 ".section .fixup, \"ax\"\n"
30717- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30718+ "3: \n"
30719+
30720+#ifdef CONFIG_PAX_KERNEXEC
30721+ " movl %%cr0, %0\n"
30722+ " movl %0, %%eax\n"
30723+ " andl $0xFFFEFFFF, %%eax\n"
30724+ " movl %%eax, %%cr0\n"
30725+#endif
30726+
30727+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30728+
30729+#ifdef CONFIG_PAX_KERNEXEC
30730+ " movl %0, %%cr0\n"
30731+#endif
30732+
30733 " jmp 2b\n"
30734 ".previous\n"
30735- _ASM_EXTABLE(1b, 3b) : : "r" (from));
30736+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
30737
30738 for (i = 0; i < (4096-320)/64; i++) {
30739 __asm__ __volatile__ (
30740- "1: prefetch 320(%0)\n"
30741- "2: movq (%0), %%mm0\n"
30742- " movntq %%mm0, (%1)\n"
30743- " movq 8(%0), %%mm1\n"
30744- " movntq %%mm1, 8(%1)\n"
30745- " movq 16(%0), %%mm2\n"
30746- " movntq %%mm2, 16(%1)\n"
30747- " movq 24(%0), %%mm3\n"
30748- " movntq %%mm3, 24(%1)\n"
30749- " movq 32(%0), %%mm4\n"
30750- " movntq %%mm4, 32(%1)\n"
30751- " movq 40(%0), %%mm5\n"
30752- " movntq %%mm5, 40(%1)\n"
30753- " movq 48(%0), %%mm6\n"
30754- " movntq %%mm6, 48(%1)\n"
30755- " movq 56(%0), %%mm7\n"
30756- " movntq %%mm7, 56(%1)\n"
30757+ "1: prefetch 320(%1)\n"
30758+ "2: movq (%1), %%mm0\n"
30759+ " movntq %%mm0, (%2)\n"
30760+ " movq 8(%1), %%mm1\n"
30761+ " movntq %%mm1, 8(%2)\n"
30762+ " movq 16(%1), %%mm2\n"
30763+ " movntq %%mm2, 16(%2)\n"
30764+ " movq 24(%1), %%mm3\n"
30765+ " movntq %%mm3, 24(%2)\n"
30766+ " movq 32(%1), %%mm4\n"
30767+ " movntq %%mm4, 32(%2)\n"
30768+ " movq 40(%1), %%mm5\n"
30769+ " movntq %%mm5, 40(%2)\n"
30770+ " movq 48(%1), %%mm6\n"
30771+ " movntq %%mm6, 48(%2)\n"
30772+ " movq 56(%1), %%mm7\n"
30773+ " movntq %%mm7, 56(%2)\n"
30774 ".section .fixup, \"ax\"\n"
30775- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30776+ "3:\n"
30777+
30778+#ifdef CONFIG_PAX_KERNEXEC
30779+ " movl %%cr0, %0\n"
30780+ " movl %0, %%eax\n"
30781+ " andl $0xFFFEFFFF, %%eax\n"
30782+ " movl %%eax, %%cr0\n"
30783+#endif
30784+
30785+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30786+
30787+#ifdef CONFIG_PAX_KERNEXEC
30788+ " movl %0, %%cr0\n"
30789+#endif
30790+
30791 " jmp 2b\n"
30792 ".previous\n"
30793- _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
30794+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
30795
30796 from += 64;
30797 to += 64;
30798@@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
30799 static void fast_copy_page(void *to, void *from)
30800 {
30801 int i;
30802+ unsigned long cr0;
30803
30804 kernel_fpu_begin();
30805
30806 __asm__ __volatile__ (
30807- "1: prefetch (%0)\n"
30808- " prefetch 64(%0)\n"
30809- " prefetch 128(%0)\n"
30810- " prefetch 192(%0)\n"
30811- " prefetch 256(%0)\n"
30812+ "1: prefetch (%1)\n"
30813+ " prefetch 64(%1)\n"
30814+ " prefetch 128(%1)\n"
30815+ " prefetch 192(%1)\n"
30816+ " prefetch 256(%1)\n"
30817 "2: \n"
30818 ".section .fixup, \"ax\"\n"
30819- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30820+ "3: \n"
30821+
30822+#ifdef CONFIG_PAX_KERNEXEC
30823+ " movl %%cr0, %0\n"
30824+ " movl %0, %%eax\n"
30825+ " andl $0xFFFEFFFF, %%eax\n"
30826+ " movl %%eax, %%cr0\n"
30827+#endif
30828+
30829+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30830+
30831+#ifdef CONFIG_PAX_KERNEXEC
30832+ " movl %0, %%cr0\n"
30833+#endif
30834+
30835 " jmp 2b\n"
30836 ".previous\n"
30837- _ASM_EXTABLE(1b, 3b) : : "r" (from));
30838+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
30839
30840 for (i = 0; i < 4096/64; i++) {
30841 __asm__ __volatile__ (
30842- "1: prefetch 320(%0)\n"
30843- "2: movq (%0), %%mm0\n"
30844- " movq 8(%0), %%mm1\n"
30845- " movq 16(%0), %%mm2\n"
30846- " movq 24(%0), %%mm3\n"
30847- " movq %%mm0, (%1)\n"
30848- " movq %%mm1, 8(%1)\n"
30849- " movq %%mm2, 16(%1)\n"
30850- " movq %%mm3, 24(%1)\n"
30851- " movq 32(%0), %%mm0\n"
30852- " movq 40(%0), %%mm1\n"
30853- " movq 48(%0), %%mm2\n"
30854- " movq 56(%0), %%mm3\n"
30855- " movq %%mm0, 32(%1)\n"
30856- " movq %%mm1, 40(%1)\n"
30857- " movq %%mm2, 48(%1)\n"
30858- " movq %%mm3, 56(%1)\n"
30859+ "1: prefetch 320(%1)\n"
30860+ "2: movq (%1), %%mm0\n"
30861+ " movq 8(%1), %%mm1\n"
30862+ " movq 16(%1), %%mm2\n"
30863+ " movq 24(%1), %%mm3\n"
30864+ " movq %%mm0, (%2)\n"
30865+ " movq %%mm1, 8(%2)\n"
30866+ " movq %%mm2, 16(%2)\n"
30867+ " movq %%mm3, 24(%2)\n"
30868+ " movq 32(%1), %%mm0\n"
30869+ " movq 40(%1), %%mm1\n"
30870+ " movq 48(%1), %%mm2\n"
30871+ " movq 56(%1), %%mm3\n"
30872+ " movq %%mm0, 32(%2)\n"
30873+ " movq %%mm1, 40(%2)\n"
30874+ " movq %%mm2, 48(%2)\n"
30875+ " movq %%mm3, 56(%2)\n"
30876 ".section .fixup, \"ax\"\n"
30877- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30878+ "3:\n"
30879+
30880+#ifdef CONFIG_PAX_KERNEXEC
30881+ " movl %%cr0, %0\n"
30882+ " movl %0, %%eax\n"
30883+ " andl $0xFFFEFFFF, %%eax\n"
30884+ " movl %%eax, %%cr0\n"
30885+#endif
30886+
30887+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30888+
30889+#ifdef CONFIG_PAX_KERNEXEC
30890+ " movl %0, %%cr0\n"
30891+#endif
30892+
30893 " jmp 2b\n"
30894 ".previous\n"
30895 _ASM_EXTABLE(1b, 3b)
30896- : : "r" (from), "r" (to) : "memory");
30897+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
30898
30899 from += 64;
30900 to += 64;
30901diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
30902index f6d13ee..d789440 100644
30903--- a/arch/x86/lib/msr-reg.S
30904+++ b/arch/x86/lib/msr-reg.S
30905@@ -3,6 +3,7 @@
30906 #include <asm/dwarf2.h>
30907 #include <asm/asm.h>
30908 #include <asm/msr.h>
30909+#include <asm/alternative-asm.h>
30910
30911 #ifdef CONFIG_X86_64
30912 /*
30913@@ -37,6 +38,7 @@ ENTRY(\op\()_safe_regs)
30914 movl %edi, 28(%r10)
30915 popq_cfi %rbp
30916 popq_cfi %rbx
30917+ pax_force_retaddr
30918 ret
30919 3:
30920 CFI_RESTORE_STATE
30921diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
30922index fc6ba17..14ad9a5 100644
30923--- a/arch/x86/lib/putuser.S
30924+++ b/arch/x86/lib/putuser.S
30925@@ -16,7 +16,9 @@
30926 #include <asm/errno.h>
30927 #include <asm/asm.h>
30928 #include <asm/smap.h>
30929-
30930+#include <asm/segment.h>
30931+#include <asm/pgtable.h>
30932+#include <asm/alternative-asm.h>
30933
30934 /*
30935 * __put_user_X
30936@@ -30,57 +32,125 @@
30937 * as they get called from within inline assembly.
30938 */
30939
30940-#define ENTER CFI_STARTPROC ; \
30941- GET_THREAD_INFO(%_ASM_BX)
30942-#define EXIT ASM_CLAC ; \
30943- ret ; \
30944+#define ENTER CFI_STARTPROC
30945+#define EXIT ASM_CLAC ; \
30946+ pax_force_retaddr ; \
30947+ ret ; \
30948 CFI_ENDPROC
30949
30950+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30951+#define _DEST %_ASM_CX,%_ASM_BX
30952+#else
30953+#define _DEST %_ASM_CX
30954+#endif
30955+
30956+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
30957+#define __copyuser_seg gs;
30958+#else
30959+#define __copyuser_seg
30960+#endif
30961+
30962 .text
30963 ENTRY(__put_user_1)
30964 ENTER
30965+
30966+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30967+ GET_THREAD_INFO(%_ASM_BX)
30968 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
30969 jae bad_put_user
30970+
30971+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30972+ mov pax_user_shadow_base,%_ASM_BX
30973+ cmp %_ASM_BX,%_ASM_CX
30974+ jb 1234f
30975+ xor %ebx,%ebx
30976+1234:
30977+#endif
30978+
30979+#endif
30980+
30981 ASM_STAC
30982-1: movb %al,(%_ASM_CX)
30983+1: __copyuser_seg movb %al,(_DEST)
30984 xor %eax,%eax
30985 EXIT
30986 ENDPROC(__put_user_1)
30987
30988 ENTRY(__put_user_2)
30989 ENTER
30990+
30991+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30992+ GET_THREAD_INFO(%_ASM_BX)
30993 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
30994 sub $1,%_ASM_BX
30995 cmp %_ASM_BX,%_ASM_CX
30996 jae bad_put_user
30997+
30998+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30999+ mov pax_user_shadow_base,%_ASM_BX
31000+ cmp %_ASM_BX,%_ASM_CX
31001+ jb 1234f
31002+ xor %ebx,%ebx
31003+1234:
31004+#endif
31005+
31006+#endif
31007+
31008 ASM_STAC
31009-2: movw %ax,(%_ASM_CX)
31010+2: __copyuser_seg movw %ax,(_DEST)
31011 xor %eax,%eax
31012 EXIT
31013 ENDPROC(__put_user_2)
31014
31015 ENTRY(__put_user_4)
31016 ENTER
31017+
31018+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
31019+ GET_THREAD_INFO(%_ASM_BX)
31020 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
31021 sub $3,%_ASM_BX
31022 cmp %_ASM_BX,%_ASM_CX
31023 jae bad_put_user
31024+
31025+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
31026+ mov pax_user_shadow_base,%_ASM_BX
31027+ cmp %_ASM_BX,%_ASM_CX
31028+ jb 1234f
31029+ xor %ebx,%ebx
31030+1234:
31031+#endif
31032+
31033+#endif
31034+
31035 ASM_STAC
31036-3: movl %eax,(%_ASM_CX)
31037+3: __copyuser_seg movl %eax,(_DEST)
31038 xor %eax,%eax
31039 EXIT
31040 ENDPROC(__put_user_4)
31041
31042 ENTRY(__put_user_8)
31043 ENTER
31044+
31045+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
31046+ GET_THREAD_INFO(%_ASM_BX)
31047 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
31048 sub $7,%_ASM_BX
31049 cmp %_ASM_BX,%_ASM_CX
31050 jae bad_put_user
31051+
31052+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
31053+ mov pax_user_shadow_base,%_ASM_BX
31054+ cmp %_ASM_BX,%_ASM_CX
31055+ jb 1234f
31056+ xor %ebx,%ebx
31057+1234:
31058+#endif
31059+
31060+#endif
31061+
31062 ASM_STAC
31063-4: mov %_ASM_AX,(%_ASM_CX)
31064+4: __copyuser_seg mov %_ASM_AX,(_DEST)
31065 #ifdef CONFIG_X86_32
31066-5: movl %edx,4(%_ASM_CX)
31067+5: __copyuser_seg movl %edx,4(_DEST)
31068 #endif
31069 xor %eax,%eax
31070 EXIT
31071diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S
31072index 5dff5f0..cadebf4 100644
31073--- a/arch/x86/lib/rwsem.S
31074+++ b/arch/x86/lib/rwsem.S
31075@@ -94,6 +94,7 @@ ENTRY(call_rwsem_down_read_failed)
31076 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
31077 CFI_RESTORE __ASM_REG(dx)
31078 restore_common_regs
31079+ pax_force_retaddr
31080 ret
31081 CFI_ENDPROC
31082 ENDPROC(call_rwsem_down_read_failed)
31083@@ -104,6 +105,7 @@ ENTRY(call_rwsem_down_write_failed)
31084 movq %rax,%rdi
31085 call rwsem_down_write_failed
31086 restore_common_regs
31087+ pax_force_retaddr
31088 ret
31089 CFI_ENDPROC
31090 ENDPROC(call_rwsem_down_write_failed)
31091@@ -117,7 +119,8 @@ ENTRY(call_rwsem_wake)
31092 movq %rax,%rdi
31093 call rwsem_wake
31094 restore_common_regs
31095-1: ret
31096+1: pax_force_retaddr
31097+ ret
31098 CFI_ENDPROC
31099 ENDPROC(call_rwsem_wake)
31100
31101@@ -131,6 +134,7 @@ ENTRY(call_rwsem_downgrade_wake)
31102 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
31103 CFI_RESTORE __ASM_REG(dx)
31104 restore_common_regs
31105+ pax_force_retaddr
31106 ret
31107 CFI_ENDPROC
31108 ENDPROC(call_rwsem_downgrade_wake)
31109diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
31110index b30b5eb..2b57052 100644
31111--- a/arch/x86/lib/thunk_64.S
31112+++ b/arch/x86/lib/thunk_64.S
31113@@ -9,6 +9,7 @@
31114 #include <asm/dwarf2.h>
31115 #include <asm/calling.h>
31116 #include <asm/asm.h>
31117+#include <asm/alternative-asm.h>
31118
31119 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
31120 .macro THUNK name, func, put_ret_addr_in_rdi=0
31121@@ -16,11 +17,11 @@
31122 \name:
31123 CFI_STARTPROC
31124
31125- /* this one pushes 9 elems, the next one would be %rIP */
31126- SAVE_ARGS
31127+ /* this one pushes 15+1 elems, the next one would be %rIP */
31128+ SAVE_ARGS 8
31129
31130 .if \put_ret_addr_in_rdi
31131- movq_cfi_restore 9*8, rdi
31132+ movq_cfi_restore RIP, rdi
31133 .endif
31134
31135 call \func
31136@@ -47,9 +48,10 @@
31137
31138 /* SAVE_ARGS below is used only for the .cfi directives it contains. */
31139 CFI_STARTPROC
31140- SAVE_ARGS
31141+ SAVE_ARGS 8
31142 restore:
31143- RESTORE_ARGS
31144+ RESTORE_ARGS 1,8
31145+ pax_force_retaddr
31146 ret
31147 CFI_ENDPROC
31148 _ASM_NOKPROBE(restore)
31149diff --git a/arch/x86/lib/usercopy.c b/arch/x86/lib/usercopy.c
31150index ddf9ecb..e342586 100644
31151--- a/arch/x86/lib/usercopy.c
31152+++ b/arch/x86/lib/usercopy.c
31153@@ -20,7 +20,7 @@ copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
31154 unsigned long ret;
31155
31156 if (__range_not_ok(from, n, TASK_SIZE))
31157- return 0;
31158+ return n;
31159
31160 /*
31161 * Even though this function is typically called from NMI/IRQ context
31162diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
31163index e2f5e21..4b22130 100644
31164--- a/arch/x86/lib/usercopy_32.c
31165+++ b/arch/x86/lib/usercopy_32.c
31166@@ -42,11 +42,13 @@ do { \
31167 int __d0; \
31168 might_fault(); \
31169 __asm__ __volatile__( \
31170+ __COPYUSER_SET_ES \
31171 ASM_STAC "\n" \
31172 "0: rep; stosl\n" \
31173 " movl %2,%0\n" \
31174 "1: rep; stosb\n" \
31175 "2: " ASM_CLAC "\n" \
31176+ __COPYUSER_RESTORE_ES \
31177 ".section .fixup,\"ax\"\n" \
31178 "3: lea 0(%2,%0,4),%0\n" \
31179 " jmp 2b\n" \
31180@@ -98,7 +100,7 @@ EXPORT_SYMBOL(__clear_user);
31181
31182 #ifdef CONFIG_X86_INTEL_USERCOPY
31183 static unsigned long
31184-__copy_user_intel(void __user *to, const void *from, unsigned long size)
31185+__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
31186 {
31187 int d0, d1;
31188 __asm__ __volatile__(
31189@@ -110,36 +112,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
31190 " .align 2,0x90\n"
31191 "3: movl 0(%4), %%eax\n"
31192 "4: movl 4(%4), %%edx\n"
31193- "5: movl %%eax, 0(%3)\n"
31194- "6: movl %%edx, 4(%3)\n"
31195+ "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
31196+ "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
31197 "7: movl 8(%4), %%eax\n"
31198 "8: movl 12(%4),%%edx\n"
31199- "9: movl %%eax, 8(%3)\n"
31200- "10: movl %%edx, 12(%3)\n"
31201+ "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
31202+ "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
31203 "11: movl 16(%4), %%eax\n"
31204 "12: movl 20(%4), %%edx\n"
31205- "13: movl %%eax, 16(%3)\n"
31206- "14: movl %%edx, 20(%3)\n"
31207+ "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
31208+ "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
31209 "15: movl 24(%4), %%eax\n"
31210 "16: movl 28(%4), %%edx\n"
31211- "17: movl %%eax, 24(%3)\n"
31212- "18: movl %%edx, 28(%3)\n"
31213+ "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
31214+ "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
31215 "19: movl 32(%4), %%eax\n"
31216 "20: movl 36(%4), %%edx\n"
31217- "21: movl %%eax, 32(%3)\n"
31218- "22: movl %%edx, 36(%3)\n"
31219+ "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
31220+ "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
31221 "23: movl 40(%4), %%eax\n"
31222 "24: movl 44(%4), %%edx\n"
31223- "25: movl %%eax, 40(%3)\n"
31224- "26: movl %%edx, 44(%3)\n"
31225+ "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
31226+ "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
31227 "27: movl 48(%4), %%eax\n"
31228 "28: movl 52(%4), %%edx\n"
31229- "29: movl %%eax, 48(%3)\n"
31230- "30: movl %%edx, 52(%3)\n"
31231+ "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
31232+ "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
31233 "31: movl 56(%4), %%eax\n"
31234 "32: movl 60(%4), %%edx\n"
31235- "33: movl %%eax, 56(%3)\n"
31236- "34: movl %%edx, 60(%3)\n"
31237+ "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
31238+ "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
31239 " addl $-64, %0\n"
31240 " addl $64, %4\n"
31241 " addl $64, %3\n"
31242@@ -149,10 +151,116 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
31243 " shrl $2, %0\n"
31244 " andl $3, %%eax\n"
31245 " cld\n"
31246+ __COPYUSER_SET_ES
31247 "99: rep; movsl\n"
31248 "36: movl %%eax, %0\n"
31249 "37: rep; movsb\n"
31250 "100:\n"
31251+ __COPYUSER_RESTORE_ES
31252+ ".section .fixup,\"ax\"\n"
31253+ "101: lea 0(%%eax,%0,4),%0\n"
31254+ " jmp 100b\n"
31255+ ".previous\n"
31256+ _ASM_EXTABLE(1b,100b)
31257+ _ASM_EXTABLE(2b,100b)
31258+ _ASM_EXTABLE(3b,100b)
31259+ _ASM_EXTABLE(4b,100b)
31260+ _ASM_EXTABLE(5b,100b)
31261+ _ASM_EXTABLE(6b,100b)
31262+ _ASM_EXTABLE(7b,100b)
31263+ _ASM_EXTABLE(8b,100b)
31264+ _ASM_EXTABLE(9b,100b)
31265+ _ASM_EXTABLE(10b,100b)
31266+ _ASM_EXTABLE(11b,100b)
31267+ _ASM_EXTABLE(12b,100b)
31268+ _ASM_EXTABLE(13b,100b)
31269+ _ASM_EXTABLE(14b,100b)
31270+ _ASM_EXTABLE(15b,100b)
31271+ _ASM_EXTABLE(16b,100b)
31272+ _ASM_EXTABLE(17b,100b)
31273+ _ASM_EXTABLE(18b,100b)
31274+ _ASM_EXTABLE(19b,100b)
31275+ _ASM_EXTABLE(20b,100b)
31276+ _ASM_EXTABLE(21b,100b)
31277+ _ASM_EXTABLE(22b,100b)
31278+ _ASM_EXTABLE(23b,100b)
31279+ _ASM_EXTABLE(24b,100b)
31280+ _ASM_EXTABLE(25b,100b)
31281+ _ASM_EXTABLE(26b,100b)
31282+ _ASM_EXTABLE(27b,100b)
31283+ _ASM_EXTABLE(28b,100b)
31284+ _ASM_EXTABLE(29b,100b)
31285+ _ASM_EXTABLE(30b,100b)
31286+ _ASM_EXTABLE(31b,100b)
31287+ _ASM_EXTABLE(32b,100b)
31288+ _ASM_EXTABLE(33b,100b)
31289+ _ASM_EXTABLE(34b,100b)
31290+ _ASM_EXTABLE(35b,100b)
31291+ _ASM_EXTABLE(36b,100b)
31292+ _ASM_EXTABLE(37b,100b)
31293+ _ASM_EXTABLE(99b,101b)
31294+ : "=&c"(size), "=&D" (d0), "=&S" (d1)
31295+ : "1"(to), "2"(from), "0"(size)
31296+ : "eax", "edx", "memory");
31297+ return size;
31298+}
31299+
31300+static unsigned long
31301+__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
31302+{
31303+ int d0, d1;
31304+ __asm__ __volatile__(
31305+ " .align 2,0x90\n"
31306+ "1: "__copyuser_seg" movl 32(%4), %%eax\n"
31307+ " cmpl $67, %0\n"
31308+ " jbe 3f\n"
31309+ "2: "__copyuser_seg" movl 64(%4), %%eax\n"
31310+ " .align 2,0x90\n"
31311+ "3: "__copyuser_seg" movl 0(%4), %%eax\n"
31312+ "4: "__copyuser_seg" movl 4(%4), %%edx\n"
31313+ "5: movl %%eax, 0(%3)\n"
31314+ "6: movl %%edx, 4(%3)\n"
31315+ "7: "__copyuser_seg" movl 8(%4), %%eax\n"
31316+ "8: "__copyuser_seg" movl 12(%4),%%edx\n"
31317+ "9: movl %%eax, 8(%3)\n"
31318+ "10: movl %%edx, 12(%3)\n"
31319+ "11: "__copyuser_seg" movl 16(%4), %%eax\n"
31320+ "12: "__copyuser_seg" movl 20(%4), %%edx\n"
31321+ "13: movl %%eax, 16(%3)\n"
31322+ "14: movl %%edx, 20(%3)\n"
31323+ "15: "__copyuser_seg" movl 24(%4), %%eax\n"
31324+ "16: "__copyuser_seg" movl 28(%4), %%edx\n"
31325+ "17: movl %%eax, 24(%3)\n"
31326+ "18: movl %%edx, 28(%3)\n"
31327+ "19: "__copyuser_seg" movl 32(%4), %%eax\n"
31328+ "20: "__copyuser_seg" movl 36(%4), %%edx\n"
31329+ "21: movl %%eax, 32(%3)\n"
31330+ "22: movl %%edx, 36(%3)\n"
31331+ "23: "__copyuser_seg" movl 40(%4), %%eax\n"
31332+ "24: "__copyuser_seg" movl 44(%4), %%edx\n"
31333+ "25: movl %%eax, 40(%3)\n"
31334+ "26: movl %%edx, 44(%3)\n"
31335+ "27: "__copyuser_seg" movl 48(%4), %%eax\n"
31336+ "28: "__copyuser_seg" movl 52(%4), %%edx\n"
31337+ "29: movl %%eax, 48(%3)\n"
31338+ "30: movl %%edx, 52(%3)\n"
31339+ "31: "__copyuser_seg" movl 56(%4), %%eax\n"
31340+ "32: "__copyuser_seg" movl 60(%4), %%edx\n"
31341+ "33: movl %%eax, 56(%3)\n"
31342+ "34: movl %%edx, 60(%3)\n"
31343+ " addl $-64, %0\n"
31344+ " addl $64, %4\n"
31345+ " addl $64, %3\n"
31346+ " cmpl $63, %0\n"
31347+ " ja 1b\n"
31348+ "35: movl %0, %%eax\n"
31349+ " shrl $2, %0\n"
31350+ " andl $3, %%eax\n"
31351+ " cld\n"
31352+ "99: rep; "__copyuser_seg" movsl\n"
31353+ "36: movl %%eax, %0\n"
31354+ "37: rep; "__copyuser_seg" movsb\n"
31355+ "100:\n"
31356 ".section .fixup,\"ax\"\n"
31357 "101: lea 0(%%eax,%0,4),%0\n"
31358 " jmp 100b\n"
31359@@ -207,41 +315,41 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
31360 int d0, d1;
31361 __asm__ __volatile__(
31362 " .align 2,0x90\n"
31363- "0: movl 32(%4), %%eax\n"
31364+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
31365 " cmpl $67, %0\n"
31366 " jbe 2f\n"
31367- "1: movl 64(%4), %%eax\n"
31368+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
31369 " .align 2,0x90\n"
31370- "2: movl 0(%4), %%eax\n"
31371- "21: movl 4(%4), %%edx\n"
31372+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
31373+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
31374 " movl %%eax, 0(%3)\n"
31375 " movl %%edx, 4(%3)\n"
31376- "3: movl 8(%4), %%eax\n"
31377- "31: movl 12(%4),%%edx\n"
31378+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
31379+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
31380 " movl %%eax, 8(%3)\n"
31381 " movl %%edx, 12(%3)\n"
31382- "4: movl 16(%4), %%eax\n"
31383- "41: movl 20(%4), %%edx\n"
31384+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
31385+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
31386 " movl %%eax, 16(%3)\n"
31387 " movl %%edx, 20(%3)\n"
31388- "10: movl 24(%4), %%eax\n"
31389- "51: movl 28(%4), %%edx\n"
31390+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
31391+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
31392 " movl %%eax, 24(%3)\n"
31393 " movl %%edx, 28(%3)\n"
31394- "11: movl 32(%4), %%eax\n"
31395- "61: movl 36(%4), %%edx\n"
31396+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
31397+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
31398 " movl %%eax, 32(%3)\n"
31399 " movl %%edx, 36(%3)\n"
31400- "12: movl 40(%4), %%eax\n"
31401- "71: movl 44(%4), %%edx\n"
31402+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
31403+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
31404 " movl %%eax, 40(%3)\n"
31405 " movl %%edx, 44(%3)\n"
31406- "13: movl 48(%4), %%eax\n"
31407- "81: movl 52(%4), %%edx\n"
31408+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
31409+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
31410 " movl %%eax, 48(%3)\n"
31411 " movl %%edx, 52(%3)\n"
31412- "14: movl 56(%4), %%eax\n"
31413- "91: movl 60(%4), %%edx\n"
31414+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
31415+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
31416 " movl %%eax, 56(%3)\n"
31417 " movl %%edx, 60(%3)\n"
31418 " addl $-64, %0\n"
31419@@ -253,9 +361,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
31420 " shrl $2, %0\n"
31421 " andl $3, %%eax\n"
31422 " cld\n"
31423- "6: rep; movsl\n"
31424+ "6: rep; "__copyuser_seg" movsl\n"
31425 " movl %%eax,%0\n"
31426- "7: rep; movsb\n"
31427+ "7: rep; "__copyuser_seg" movsb\n"
31428 "8:\n"
31429 ".section .fixup,\"ax\"\n"
31430 "9: lea 0(%%eax,%0,4),%0\n"
31431@@ -305,41 +413,41 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
31432
31433 __asm__ __volatile__(
31434 " .align 2,0x90\n"
31435- "0: movl 32(%4), %%eax\n"
31436+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
31437 " cmpl $67, %0\n"
31438 " jbe 2f\n"
31439- "1: movl 64(%4), %%eax\n"
31440+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
31441 " .align 2,0x90\n"
31442- "2: movl 0(%4), %%eax\n"
31443- "21: movl 4(%4), %%edx\n"
31444+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
31445+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
31446 " movnti %%eax, 0(%3)\n"
31447 " movnti %%edx, 4(%3)\n"
31448- "3: movl 8(%4), %%eax\n"
31449- "31: movl 12(%4),%%edx\n"
31450+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
31451+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
31452 " movnti %%eax, 8(%3)\n"
31453 " movnti %%edx, 12(%3)\n"
31454- "4: movl 16(%4), %%eax\n"
31455- "41: movl 20(%4), %%edx\n"
31456+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
31457+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
31458 " movnti %%eax, 16(%3)\n"
31459 " movnti %%edx, 20(%3)\n"
31460- "10: movl 24(%4), %%eax\n"
31461- "51: movl 28(%4), %%edx\n"
31462+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
31463+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
31464 " movnti %%eax, 24(%3)\n"
31465 " movnti %%edx, 28(%3)\n"
31466- "11: movl 32(%4), %%eax\n"
31467- "61: movl 36(%4), %%edx\n"
31468+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
31469+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
31470 " movnti %%eax, 32(%3)\n"
31471 " movnti %%edx, 36(%3)\n"
31472- "12: movl 40(%4), %%eax\n"
31473- "71: movl 44(%4), %%edx\n"
31474+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
31475+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
31476 " movnti %%eax, 40(%3)\n"
31477 " movnti %%edx, 44(%3)\n"
31478- "13: movl 48(%4), %%eax\n"
31479- "81: movl 52(%4), %%edx\n"
31480+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
31481+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
31482 " movnti %%eax, 48(%3)\n"
31483 " movnti %%edx, 52(%3)\n"
31484- "14: movl 56(%4), %%eax\n"
31485- "91: movl 60(%4), %%edx\n"
31486+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
31487+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
31488 " movnti %%eax, 56(%3)\n"
31489 " movnti %%edx, 60(%3)\n"
31490 " addl $-64, %0\n"
31491@@ -352,9 +460,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
31492 " shrl $2, %0\n"
31493 " andl $3, %%eax\n"
31494 " cld\n"
31495- "6: rep; movsl\n"
31496+ "6: rep; "__copyuser_seg" movsl\n"
31497 " movl %%eax,%0\n"
31498- "7: rep; movsb\n"
31499+ "7: rep; "__copyuser_seg" movsb\n"
31500 "8:\n"
31501 ".section .fixup,\"ax\"\n"
31502 "9: lea 0(%%eax,%0,4),%0\n"
31503@@ -399,41 +507,41 @@ static unsigned long __copy_user_intel_nocache(void *to,
31504
31505 __asm__ __volatile__(
31506 " .align 2,0x90\n"
31507- "0: movl 32(%4), %%eax\n"
31508+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
31509 " cmpl $67, %0\n"
31510 " jbe 2f\n"
31511- "1: movl 64(%4), %%eax\n"
31512+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
31513 " .align 2,0x90\n"
31514- "2: movl 0(%4), %%eax\n"
31515- "21: movl 4(%4), %%edx\n"
31516+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
31517+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
31518 " movnti %%eax, 0(%3)\n"
31519 " movnti %%edx, 4(%3)\n"
31520- "3: movl 8(%4), %%eax\n"
31521- "31: movl 12(%4),%%edx\n"
31522+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
31523+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
31524 " movnti %%eax, 8(%3)\n"
31525 " movnti %%edx, 12(%3)\n"
31526- "4: movl 16(%4), %%eax\n"
31527- "41: movl 20(%4), %%edx\n"
31528+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
31529+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
31530 " movnti %%eax, 16(%3)\n"
31531 " movnti %%edx, 20(%3)\n"
31532- "10: movl 24(%4), %%eax\n"
31533- "51: movl 28(%4), %%edx\n"
31534+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
31535+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
31536 " movnti %%eax, 24(%3)\n"
31537 " movnti %%edx, 28(%3)\n"
31538- "11: movl 32(%4), %%eax\n"
31539- "61: movl 36(%4), %%edx\n"
31540+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
31541+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
31542 " movnti %%eax, 32(%3)\n"
31543 " movnti %%edx, 36(%3)\n"
31544- "12: movl 40(%4), %%eax\n"
31545- "71: movl 44(%4), %%edx\n"
31546+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
31547+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
31548 " movnti %%eax, 40(%3)\n"
31549 " movnti %%edx, 44(%3)\n"
31550- "13: movl 48(%4), %%eax\n"
31551- "81: movl 52(%4), %%edx\n"
31552+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
31553+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
31554 " movnti %%eax, 48(%3)\n"
31555 " movnti %%edx, 52(%3)\n"
31556- "14: movl 56(%4), %%eax\n"
31557- "91: movl 60(%4), %%edx\n"
31558+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
31559+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
31560 " movnti %%eax, 56(%3)\n"
31561 " movnti %%edx, 60(%3)\n"
31562 " addl $-64, %0\n"
31563@@ -446,9 +554,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
31564 " shrl $2, %0\n"
31565 " andl $3, %%eax\n"
31566 " cld\n"
31567- "6: rep; movsl\n"
31568+ "6: rep; "__copyuser_seg" movsl\n"
31569 " movl %%eax,%0\n"
31570- "7: rep; movsb\n"
31571+ "7: rep; "__copyuser_seg" movsb\n"
31572 "8:\n"
31573 ".section .fixup,\"ax\"\n"
31574 "9: lea 0(%%eax,%0,4),%0\n"
31575@@ -488,32 +596,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
31576 */
31577 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
31578 unsigned long size);
31579-unsigned long __copy_user_intel(void __user *to, const void *from,
31580+unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
31581+ unsigned long size);
31582+unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
31583 unsigned long size);
31584 unsigned long __copy_user_zeroing_intel_nocache(void *to,
31585 const void __user *from, unsigned long size);
31586 #endif /* CONFIG_X86_INTEL_USERCOPY */
31587
31588 /* Generic arbitrary sized copy. */
31589-#define __copy_user(to, from, size) \
31590+#define __copy_user(to, from, size, prefix, set, restore) \
31591 do { \
31592 int __d0, __d1, __d2; \
31593 __asm__ __volatile__( \
31594+ set \
31595 " cmp $7,%0\n" \
31596 " jbe 1f\n" \
31597 " movl %1,%0\n" \
31598 " negl %0\n" \
31599 " andl $7,%0\n" \
31600 " subl %0,%3\n" \
31601- "4: rep; movsb\n" \
31602+ "4: rep; "prefix"movsb\n" \
31603 " movl %3,%0\n" \
31604 " shrl $2,%0\n" \
31605 " andl $3,%3\n" \
31606 " .align 2,0x90\n" \
31607- "0: rep; movsl\n" \
31608+ "0: rep; "prefix"movsl\n" \
31609 " movl %3,%0\n" \
31610- "1: rep; movsb\n" \
31611+ "1: rep; "prefix"movsb\n" \
31612 "2:\n" \
31613+ restore \
31614 ".section .fixup,\"ax\"\n" \
31615 "5: addl %3,%0\n" \
31616 " jmp 2b\n" \
31617@@ -538,14 +650,14 @@ do { \
31618 " negl %0\n" \
31619 " andl $7,%0\n" \
31620 " subl %0,%3\n" \
31621- "4: rep; movsb\n" \
31622+ "4: rep; "__copyuser_seg"movsb\n" \
31623 " movl %3,%0\n" \
31624 " shrl $2,%0\n" \
31625 " andl $3,%3\n" \
31626 " .align 2,0x90\n" \
31627- "0: rep; movsl\n" \
31628+ "0: rep; "__copyuser_seg"movsl\n" \
31629 " movl %3,%0\n" \
31630- "1: rep; movsb\n" \
31631+ "1: rep; "__copyuser_seg"movsb\n" \
31632 "2:\n" \
31633 ".section .fixup,\"ax\"\n" \
31634 "5: addl %3,%0\n" \
31635@@ -572,9 +684,9 @@ unsigned long __copy_to_user_ll(void __user *to, const void *from,
31636 {
31637 stac();
31638 if (movsl_is_ok(to, from, n))
31639- __copy_user(to, from, n);
31640+ __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
31641 else
31642- n = __copy_user_intel(to, from, n);
31643+ n = __generic_copy_to_user_intel(to, from, n);
31644 clac();
31645 return n;
31646 }
31647@@ -598,10 +710,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
31648 {
31649 stac();
31650 if (movsl_is_ok(to, from, n))
31651- __copy_user(to, from, n);
31652+ __copy_user(to, from, n, __copyuser_seg, "", "");
31653 else
31654- n = __copy_user_intel((void __user *)to,
31655- (const void *)from, n);
31656+ n = __generic_copy_from_user_intel(to, from, n);
31657 clac();
31658 return n;
31659 }
31660@@ -632,58 +743,38 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
31661 if (n > 64 && cpu_has_xmm2)
31662 n = __copy_user_intel_nocache(to, from, n);
31663 else
31664- __copy_user(to, from, n);
31665+ __copy_user(to, from, n, __copyuser_seg, "", "");
31666 #else
31667- __copy_user(to, from, n);
31668+ __copy_user(to, from, n, __copyuser_seg, "", "");
31669 #endif
31670 clac();
31671 return n;
31672 }
31673 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
31674
31675-/**
31676- * copy_to_user: - Copy a block of data into user space.
31677- * @to: Destination address, in user space.
31678- * @from: Source address, in kernel space.
31679- * @n: Number of bytes to copy.
31680- *
31681- * Context: User context only. This function may sleep.
31682- *
31683- * Copy data from kernel space to user space.
31684- *
31685- * Returns number of bytes that could not be copied.
31686- * On success, this will be zero.
31687- */
31688-unsigned long _copy_to_user(void __user *to, const void *from, unsigned n)
31689+#ifdef CONFIG_PAX_MEMORY_UDEREF
31690+void __set_fs(mm_segment_t x)
31691 {
31692- if (access_ok(VERIFY_WRITE, to, n))
31693- n = __copy_to_user(to, from, n);
31694- return n;
31695+ switch (x.seg) {
31696+ case 0:
31697+ loadsegment(gs, 0);
31698+ break;
31699+ case TASK_SIZE_MAX:
31700+ loadsegment(gs, __USER_DS);
31701+ break;
31702+ case -1UL:
31703+ loadsegment(gs, __KERNEL_DS);
31704+ break;
31705+ default:
31706+ BUG();
31707+ }
31708 }
31709-EXPORT_SYMBOL(_copy_to_user);
31710+EXPORT_SYMBOL(__set_fs);
31711
31712-/**
31713- * copy_from_user: - Copy a block of data from user space.
31714- * @to: Destination address, in kernel space.
31715- * @from: Source address, in user space.
31716- * @n: Number of bytes to copy.
31717- *
31718- * Context: User context only. This function may sleep.
31719- *
31720- * Copy data from user space to kernel space.
31721- *
31722- * Returns number of bytes that could not be copied.
31723- * On success, this will be zero.
31724- *
31725- * If some data could not be copied, this function will pad the copied
31726- * data to the requested size using zero bytes.
31727- */
31728-unsigned long _copy_from_user(void *to, const void __user *from, unsigned n)
31729+void set_fs(mm_segment_t x)
31730 {
31731- if (access_ok(VERIFY_READ, from, n))
31732- n = __copy_from_user(to, from, n);
31733- else
31734- memset(to, 0, n);
31735- return n;
31736+ current_thread_info()->addr_limit = x;
31737+ __set_fs(x);
31738 }
31739-EXPORT_SYMBOL(_copy_from_user);
31740+EXPORT_SYMBOL(set_fs);
31741+#endif
31742diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
31743index 0a42327..7a82465 100644
31744--- a/arch/x86/lib/usercopy_64.c
31745+++ b/arch/x86/lib/usercopy_64.c
31746@@ -18,6 +18,7 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
31747 might_fault();
31748 /* no memory constraint because it doesn't change any memory gcc knows
31749 about */
31750+ pax_open_userland();
31751 stac();
31752 asm volatile(
31753 " testq %[size8],%[size8]\n"
31754@@ -39,9 +40,10 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
31755 _ASM_EXTABLE(0b,3b)
31756 _ASM_EXTABLE(1b,2b)
31757 : [size8] "=&c"(size), [dst] "=&D" (__d0)
31758- : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr),
31759+ : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(____m(addr)),
31760 [zero] "r" (0UL), [eight] "r" (8UL));
31761 clac();
31762+ pax_close_userland();
31763 return size;
31764 }
31765 EXPORT_SYMBOL(__clear_user);
31766@@ -54,12 +56,11 @@ unsigned long clear_user(void __user *to, unsigned long n)
31767 }
31768 EXPORT_SYMBOL(clear_user);
31769
31770-unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
31771+unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len)
31772 {
31773- if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
31774- return copy_user_generic((__force void *)to, (__force void *)from, len);
31775- }
31776- return len;
31777+ if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len))
31778+ return copy_user_generic((void __force_kernel *)____m(to), (void __force_kernel *)____m(from), len);
31779+ return len;
31780 }
31781 EXPORT_SYMBOL(copy_in_user);
31782
31783@@ -69,8 +70,10 @@ EXPORT_SYMBOL(copy_in_user);
31784 * it is not necessary to optimize tail handling.
31785 */
31786 __visible unsigned long
31787-copy_user_handle_tail(char *to, char *from, unsigned len)
31788+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len)
31789 {
31790+ clac();
31791+ pax_close_userland();
31792 for (; len; --len, to++) {
31793 char c;
31794
31795@@ -79,10 +82,9 @@ copy_user_handle_tail(char *to, char *from, unsigned len)
31796 if (__put_user_nocheck(c, to, sizeof(char)))
31797 break;
31798 }
31799- clac();
31800
31801 /* If the destination is a kernel buffer, we always clear the end */
31802- if (!__addr_ok(to))
31803+ if (!__addr_ok(to) && (unsigned long)to >= TASK_SIZE_MAX + pax_user_shadow_base)
31804 memset(to, 0, len);
31805 return len;
31806 }
31807diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
31808index c4cc740..60a7362 100644
31809--- a/arch/x86/mm/Makefile
31810+++ b/arch/x86/mm/Makefile
31811@@ -35,3 +35,7 @@ obj-$(CONFIG_NUMA_EMU) += numa_emulation.o
31812 obj-$(CONFIG_MEMTEST) += memtest.o
31813
31814 obj-$(CONFIG_X86_INTEL_MPX) += mpx.o
31815+
31816+quote:="
31817+obj-$(CONFIG_X86_64) += uderef_64.o
31818+CFLAGS_uderef_64.o := $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS))
31819diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
31820index 903ec1e..c4166b2 100644
31821--- a/arch/x86/mm/extable.c
31822+++ b/arch/x86/mm/extable.c
31823@@ -6,12 +6,24 @@
31824 static inline unsigned long
31825 ex_insn_addr(const struct exception_table_entry *x)
31826 {
31827- return (unsigned long)&x->insn + x->insn;
31828+ unsigned long reloc = 0;
31829+
31830+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
31831+ reloc = ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
31832+#endif
31833+
31834+ return (unsigned long)&x->insn + x->insn + reloc;
31835 }
31836 static inline unsigned long
31837 ex_fixup_addr(const struct exception_table_entry *x)
31838 {
31839- return (unsigned long)&x->fixup + x->fixup;
31840+ unsigned long reloc = 0;
31841+
31842+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
31843+ reloc = ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
31844+#endif
31845+
31846+ return (unsigned long)&x->fixup + x->fixup + reloc;
31847 }
31848
31849 int fixup_exception(struct pt_regs *regs)
31850@@ -20,7 +32,7 @@ int fixup_exception(struct pt_regs *regs)
31851 unsigned long new_ip;
31852
31853 #ifdef CONFIG_PNPBIOS
31854- if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
31855+ if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
31856 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
31857 extern u32 pnp_bios_is_utter_crap;
31858 pnp_bios_is_utter_crap = 1;
31859@@ -145,6 +157,13 @@ void sort_extable(struct exception_table_entry *start,
31860 i += 4;
31861 p->fixup -= i;
31862 i += 4;
31863+
31864+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
31865+ BUILD_BUG_ON(!IS_ENABLED(CONFIG_BUILDTIME_EXTABLE_SORT));
31866+ p->insn -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
31867+ p->fixup -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
31868+#endif
31869+
31870 }
31871 }
31872
31873diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
31874index ede025f..ecc2d96 100644
31875--- a/arch/x86/mm/fault.c
31876+++ b/arch/x86/mm/fault.c
31877@@ -13,12 +13,19 @@
31878 #include <linux/hugetlb.h> /* hstate_index_to_shift */
31879 #include <linux/prefetch.h> /* prefetchw */
31880 #include <linux/context_tracking.h> /* exception_enter(), ... */
31881+#include <linux/unistd.h>
31882+#include <linux/compiler.h>
31883
31884 #include <asm/traps.h> /* dotraplinkage, ... */
31885 #include <asm/pgalloc.h> /* pgd_*(), ... */
31886 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
31887 #include <asm/fixmap.h> /* VSYSCALL_ADDR */
31888 #include <asm/vsyscall.h> /* emulate_vsyscall */
31889+#include <asm/tlbflush.h>
31890+
31891+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
31892+#include <asm/stacktrace.h>
31893+#endif
31894
31895 #define CREATE_TRACE_POINTS
31896 #include <asm/trace/exceptions.h>
31897@@ -59,7 +66,7 @@ static nokprobe_inline int kprobes_fault(struct pt_regs *regs)
31898 int ret = 0;
31899
31900 /* kprobe_running() needs smp_processor_id() */
31901- if (kprobes_built_in() && !user_mode_vm(regs)) {
31902+ if (kprobes_built_in() && !user_mode(regs)) {
31903 preempt_disable();
31904 if (kprobe_running() && kprobe_fault_handler(regs, 14))
31905 ret = 1;
31906@@ -120,7 +127,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
31907 return !instr_lo || (instr_lo>>1) == 1;
31908 case 0x00:
31909 /* Prefetch instruction is 0x0F0D or 0x0F18 */
31910- if (probe_kernel_address(instr, opcode))
31911+ if (user_mode(regs)) {
31912+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
31913+ return 0;
31914+ } else if (probe_kernel_address(instr, opcode))
31915 return 0;
31916
31917 *prefetch = (instr_lo == 0xF) &&
31918@@ -154,7 +164,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
31919 while (instr < max_instr) {
31920 unsigned char opcode;
31921
31922- if (probe_kernel_address(instr, opcode))
31923+ if (user_mode(regs)) {
31924+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
31925+ break;
31926+ } else if (probe_kernel_address(instr, opcode))
31927 break;
31928
31929 instr++;
31930@@ -185,6 +198,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
31931 force_sig_info(si_signo, &info, tsk);
31932 }
31933
31934+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
31935+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
31936+#endif
31937+
31938+#ifdef CONFIG_PAX_EMUTRAMP
31939+static int pax_handle_fetch_fault(struct pt_regs *regs);
31940+#endif
31941+
31942+#ifdef CONFIG_PAX_PAGEEXEC
31943+static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
31944+{
31945+ pgd_t *pgd;
31946+ pud_t *pud;
31947+ pmd_t *pmd;
31948+
31949+ pgd = pgd_offset(mm, address);
31950+ if (!pgd_present(*pgd))
31951+ return NULL;
31952+ pud = pud_offset(pgd, address);
31953+ if (!pud_present(*pud))
31954+ return NULL;
31955+ pmd = pmd_offset(pud, address);
31956+ if (!pmd_present(*pmd))
31957+ return NULL;
31958+ return pmd;
31959+}
31960+#endif
31961+
31962 DEFINE_SPINLOCK(pgd_lock);
31963 LIST_HEAD(pgd_list);
31964
31965@@ -235,10 +276,27 @@ void vmalloc_sync_all(void)
31966 for (address = VMALLOC_START & PMD_MASK;
31967 address >= TASK_SIZE && address < FIXADDR_TOP;
31968 address += PMD_SIZE) {
31969+
31970+#ifdef CONFIG_PAX_PER_CPU_PGD
31971+ unsigned long cpu;
31972+#else
31973 struct page *page;
31974+#endif
31975
31976 spin_lock(&pgd_lock);
31977+
31978+#ifdef CONFIG_PAX_PER_CPU_PGD
31979+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
31980+ pgd_t *pgd = get_cpu_pgd(cpu, user);
31981+ pmd_t *ret;
31982+
31983+ ret = vmalloc_sync_one(pgd, address);
31984+ if (!ret)
31985+ break;
31986+ pgd = get_cpu_pgd(cpu, kernel);
31987+#else
31988 list_for_each_entry(page, &pgd_list, lru) {
31989+ pgd_t *pgd;
31990 spinlock_t *pgt_lock;
31991 pmd_t *ret;
31992
31993@@ -246,8 +304,14 @@ void vmalloc_sync_all(void)
31994 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
31995
31996 spin_lock(pgt_lock);
31997- ret = vmalloc_sync_one(page_address(page), address);
31998+ pgd = page_address(page);
31999+#endif
32000+
32001+ ret = vmalloc_sync_one(pgd, address);
32002+
32003+#ifndef CONFIG_PAX_PER_CPU_PGD
32004 spin_unlock(pgt_lock);
32005+#endif
32006
32007 if (!ret)
32008 break;
32009@@ -281,6 +345,12 @@ static noinline int vmalloc_fault(unsigned long address)
32010 * an interrupt in the middle of a task switch..
32011 */
32012 pgd_paddr = read_cr3();
32013+
32014+#ifdef CONFIG_PAX_PER_CPU_PGD
32015+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id(), kernel)) != (pgd_paddr & __PHYSICAL_MASK));
32016+ vmalloc_sync_one(__va(pgd_paddr + PAGE_SIZE), address);
32017+#endif
32018+
32019 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
32020 if (!pmd_k)
32021 return -1;
32022@@ -377,11 +447,25 @@ static noinline int vmalloc_fault(unsigned long address)
32023 * happen within a race in page table update. In the later
32024 * case just flush:
32025 */
32026- pgd = pgd_offset(current->active_mm, address);
32027+
32028 pgd_ref = pgd_offset_k(address);
32029 if (pgd_none(*pgd_ref))
32030 return -1;
32031
32032+#ifdef CONFIG_PAX_PER_CPU_PGD
32033+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id(), kernel)) != (read_cr3() & __PHYSICAL_MASK));
32034+ pgd = pgd_offset_cpu(smp_processor_id(), user, address);
32035+ if (pgd_none(*pgd)) {
32036+ set_pgd(pgd, *pgd_ref);
32037+ arch_flush_lazy_mmu_mode();
32038+ } else {
32039+ BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
32040+ }
32041+ pgd = pgd_offset_cpu(smp_processor_id(), kernel, address);
32042+#else
32043+ pgd = pgd_offset(current->active_mm, address);
32044+#endif
32045+
32046 if (pgd_none(*pgd)) {
32047 set_pgd(pgd, *pgd_ref);
32048 arch_flush_lazy_mmu_mode();
32049@@ -548,7 +632,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
32050 static int is_errata100(struct pt_regs *regs, unsigned long address)
32051 {
32052 #ifdef CONFIG_X86_64
32053- if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
32054+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
32055 return 1;
32056 #endif
32057 return 0;
32058@@ -575,9 +659,9 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
32059 }
32060
32061 static const char nx_warning[] = KERN_CRIT
32062-"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
32063+"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
32064 static const char smep_warning[] = KERN_CRIT
32065-"unable to execute userspace code (SMEP?) (uid: %d)\n";
32066+"unable to execute userspace code (SMEP?) (uid: %d, task: %s, pid: %d)\n";
32067
32068 static void
32069 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
32070@@ -586,7 +670,7 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
32071 if (!oops_may_print())
32072 return;
32073
32074- if (error_code & PF_INSTR) {
32075+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
32076 unsigned int level;
32077 pgd_t *pgd;
32078 pte_t *pte;
32079@@ -597,13 +681,25 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
32080 pte = lookup_address_in_pgd(pgd, address, &level);
32081
32082 if (pte && pte_present(*pte) && !pte_exec(*pte))
32083- printk(nx_warning, from_kuid(&init_user_ns, current_uid()));
32084+ printk(nx_warning, from_kuid_munged(&init_user_ns, current_uid()), current->comm, task_pid_nr(current));
32085 if (pte && pte_present(*pte) && pte_exec(*pte) &&
32086 (pgd_flags(*pgd) & _PAGE_USER) &&
32087 (__read_cr4() & X86_CR4_SMEP))
32088- printk(smep_warning, from_kuid(&init_user_ns, current_uid()));
32089+ printk(smep_warning, from_kuid(&init_user_ns, current_uid()), current->comm, task_pid_nr(current));
32090 }
32091
32092+#ifdef CONFIG_PAX_KERNEXEC
32093+ if (init_mm.start_code <= address && address < init_mm.end_code) {
32094+ if (current->signal->curr_ip)
32095+ printk(KERN_EMERG "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
32096+ &current->signal->curr_ip, current->comm, task_pid_nr(current),
32097+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
32098+ else
32099+ printk(KERN_EMERG "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", current->comm, task_pid_nr(current),
32100+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
32101+ }
32102+#endif
32103+
32104 printk(KERN_ALERT "BUG: unable to handle kernel ");
32105 if (address < PAGE_SIZE)
32106 printk(KERN_CONT "NULL pointer dereference");
32107@@ -782,6 +878,22 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
32108 return;
32109 }
32110 #endif
32111+
32112+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
32113+ if (pax_is_fetch_fault(regs, error_code, address)) {
32114+
32115+#ifdef CONFIG_PAX_EMUTRAMP
32116+ switch (pax_handle_fetch_fault(regs)) {
32117+ case 2:
32118+ return;
32119+ }
32120+#endif
32121+
32122+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
32123+ do_group_exit(SIGKILL);
32124+ }
32125+#endif
32126+
32127 /* Kernel addresses are always protection faults: */
32128 if (address >= TASK_SIZE)
32129 error_code |= PF_PROT;
32130@@ -864,7 +976,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
32131 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
32132 printk(KERN_ERR
32133 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
32134- tsk->comm, tsk->pid, address);
32135+ tsk->comm, task_pid_nr(tsk), address);
32136 code = BUS_MCEERR_AR;
32137 }
32138 #endif
32139@@ -916,6 +1028,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
32140 return 1;
32141 }
32142
32143+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
32144+static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
32145+{
32146+ pte_t *pte;
32147+ pmd_t *pmd;
32148+ spinlock_t *ptl;
32149+ unsigned char pte_mask;
32150+
32151+ if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
32152+ !(mm->pax_flags & MF_PAX_PAGEEXEC))
32153+ return 0;
32154+
32155+ /* PaX: it's our fault, let's handle it if we can */
32156+
32157+ /* PaX: take a look at read faults before acquiring any locks */
32158+ if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
32159+ /* instruction fetch attempt from a protected page in user mode */
32160+ up_read(&mm->mmap_sem);
32161+
32162+#ifdef CONFIG_PAX_EMUTRAMP
32163+ switch (pax_handle_fetch_fault(regs)) {
32164+ case 2:
32165+ return 1;
32166+ }
32167+#endif
32168+
32169+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
32170+ do_group_exit(SIGKILL);
32171+ }
32172+
32173+ pmd = pax_get_pmd(mm, address);
32174+ if (unlikely(!pmd))
32175+ return 0;
32176+
32177+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
32178+ if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
32179+ pte_unmap_unlock(pte, ptl);
32180+ return 0;
32181+ }
32182+
32183+ if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
32184+ /* write attempt to a protected page in user mode */
32185+ pte_unmap_unlock(pte, ptl);
32186+ return 0;
32187+ }
32188+
32189+#ifdef CONFIG_SMP
32190+ if (likely(address > get_limit(regs->cs) && cpumask_test_cpu(smp_processor_id(), &mm->context.cpu_user_cs_mask)))
32191+#else
32192+ if (likely(address > get_limit(regs->cs)))
32193+#endif
32194+ {
32195+ set_pte(pte, pte_mkread(*pte));
32196+ __flush_tlb_one(address);
32197+ pte_unmap_unlock(pte, ptl);
32198+ up_read(&mm->mmap_sem);
32199+ return 1;
32200+ }
32201+
32202+ pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
32203+
32204+ /*
32205+ * PaX: fill DTLB with user rights and retry
32206+ */
32207+ __asm__ __volatile__ (
32208+ "orb %2,(%1)\n"
32209+#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
32210+/*
32211+ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
32212+ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
32213+ * page fault when examined during a TLB load attempt. this is true not only
32214+ * for PTEs holding a non-present entry but also present entries that will
32215+ * raise a page fault (such as those set up by PaX, or the copy-on-write
32216+ * mechanism). in effect it means that we do *not* need to flush the TLBs
32217+ * for our target pages since their PTEs are simply not in the TLBs at all.
32218+
32219+ * the best thing in omitting it is that we gain around 15-20% speed in the
32220+ * fast path of the page fault handler and can get rid of tracing since we
32221+ * can no longer flush unintended entries.
32222+ */
32223+ "invlpg (%0)\n"
32224+#endif
32225+ __copyuser_seg"testb $0,(%0)\n"
32226+ "xorb %3,(%1)\n"
32227+ :
32228+ : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
32229+ : "memory", "cc");
32230+ pte_unmap_unlock(pte, ptl);
32231+ up_read(&mm->mmap_sem);
32232+ return 1;
32233+}
32234+#endif
32235+
32236 /*
32237 * Handle a spurious fault caused by a stale TLB entry.
32238 *
32239@@ -1001,6 +1206,9 @@ int show_unhandled_signals = 1;
32240 static inline int
32241 access_error(unsigned long error_code, struct vm_area_struct *vma)
32242 {
32243+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
32244+ return 1;
32245+
32246 if (error_code & PF_WRITE) {
32247 /* write, present and write, not present: */
32248 if (unlikely(!(vma->vm_flags & VM_WRITE)))
32249@@ -1035,7 +1243,7 @@ static inline bool smap_violation(int error_code, struct pt_regs *regs)
32250 if (error_code & PF_USER)
32251 return false;
32252
32253- if (!user_mode_vm(regs) && (regs->flags & X86_EFLAGS_AC))
32254+ if (!user_mode(regs) && (regs->flags & X86_EFLAGS_AC))
32255 return false;
32256
32257 return true;
32258@@ -1063,6 +1271,22 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
32259 tsk = current;
32260 mm = tsk->mm;
32261
32262+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
32263+ if (!user_mode(regs) && address < 2 * pax_user_shadow_base) {
32264+ if (!search_exception_tables(regs->ip)) {
32265+ printk(KERN_EMERG "PAX: please report this to pageexec@freemail.hu\n");
32266+ bad_area_nosemaphore(regs, error_code, address);
32267+ return;
32268+ }
32269+ if (address < pax_user_shadow_base) {
32270+ printk(KERN_EMERG "PAX: please report this to pageexec@freemail.hu\n");
32271+ printk(KERN_EMERG "PAX: faulting IP: %pS\n", (void *)regs->ip);
32272+ show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_EMERG);
32273+ } else
32274+ address -= pax_user_shadow_base;
32275+ }
32276+#endif
32277+
32278 /*
32279 * Detect and handle instructions that would cause a page fault for
32280 * both a tracked kernel page and a userspace page.
32281@@ -1140,7 +1364,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
32282 * User-mode registers count as a user access even for any
32283 * potential system fault or CPU buglet:
32284 */
32285- if (user_mode_vm(regs)) {
32286+ if (user_mode(regs)) {
32287 local_irq_enable();
32288 error_code |= PF_USER;
32289 flags |= FAULT_FLAG_USER;
32290@@ -1187,6 +1411,11 @@ retry:
32291 might_sleep();
32292 }
32293
32294+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
32295+ if (pax_handle_pageexec_fault(regs, mm, address, error_code))
32296+ return;
32297+#endif
32298+
32299 vma = find_vma(mm, address);
32300 if (unlikely(!vma)) {
32301 bad_area(regs, error_code, address);
32302@@ -1198,18 +1427,24 @@ retry:
32303 bad_area(regs, error_code, address);
32304 return;
32305 }
32306- if (error_code & PF_USER) {
32307- /*
32308- * Accessing the stack below %sp is always a bug.
32309- * The large cushion allows instructions like enter
32310- * and pusha to work. ("enter $65535, $31" pushes
32311- * 32 pointers and then decrements %sp by 65535.)
32312- */
32313- if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
32314- bad_area(regs, error_code, address);
32315- return;
32316- }
32317+ /*
32318+ * Accessing the stack below %sp is always a bug.
32319+ * The large cushion allows instructions like enter
32320+ * and pusha to work. ("enter $65535, $31" pushes
32321+ * 32 pointers and then decrements %sp by 65535.)
32322+ */
32323+ if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
32324+ bad_area(regs, error_code, address);
32325+ return;
32326 }
32327+
32328+#ifdef CONFIG_PAX_SEGMEXEC
32329+ if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
32330+ bad_area(regs, error_code, address);
32331+ return;
32332+ }
32333+#endif
32334+
32335 if (unlikely(expand_stack(vma, address))) {
32336 bad_area(regs, error_code, address);
32337 return;
32338@@ -1329,3 +1564,292 @@ trace_do_page_fault(struct pt_regs *regs, unsigned long error_code)
32339 }
32340 NOKPROBE_SYMBOL(trace_do_page_fault);
32341 #endif /* CONFIG_TRACING */
32342+
32343+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
32344+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
32345+{
32346+ struct mm_struct *mm = current->mm;
32347+ unsigned long ip = regs->ip;
32348+
32349+ if (v8086_mode(regs))
32350+ ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
32351+
32352+#ifdef CONFIG_PAX_PAGEEXEC
32353+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
32354+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
32355+ return true;
32356+ if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
32357+ return true;
32358+ return false;
32359+ }
32360+#endif
32361+
32362+#ifdef CONFIG_PAX_SEGMEXEC
32363+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
32364+ if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
32365+ return true;
32366+ return false;
32367+ }
32368+#endif
32369+
32370+ return false;
32371+}
32372+#endif
32373+
32374+#ifdef CONFIG_PAX_EMUTRAMP
32375+static int pax_handle_fetch_fault_32(struct pt_regs *regs)
32376+{
32377+ int err;
32378+
32379+ do { /* PaX: libffi trampoline emulation */
32380+ unsigned char mov, jmp;
32381+ unsigned int addr1, addr2;
32382+
32383+#ifdef CONFIG_X86_64
32384+ if ((regs->ip + 9) >> 32)
32385+ break;
32386+#endif
32387+
32388+ err = get_user(mov, (unsigned char __user *)regs->ip);
32389+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
32390+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
32391+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
32392+
32393+ if (err)
32394+ break;
32395+
32396+ if (mov == 0xB8 && jmp == 0xE9) {
32397+ regs->ax = addr1;
32398+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
32399+ return 2;
32400+ }
32401+ } while (0);
32402+
32403+ do { /* PaX: gcc trampoline emulation #1 */
32404+ unsigned char mov1, mov2;
32405+ unsigned short jmp;
32406+ unsigned int addr1, addr2;
32407+
32408+#ifdef CONFIG_X86_64
32409+ if ((regs->ip + 11) >> 32)
32410+ break;
32411+#endif
32412+
32413+ err = get_user(mov1, (unsigned char __user *)regs->ip);
32414+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
32415+ err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
32416+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
32417+ err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
32418+
32419+ if (err)
32420+ break;
32421+
32422+ if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
32423+ regs->cx = addr1;
32424+ regs->ax = addr2;
32425+ regs->ip = addr2;
32426+ return 2;
32427+ }
32428+ } while (0);
32429+
32430+ do { /* PaX: gcc trampoline emulation #2 */
32431+ unsigned char mov, jmp;
32432+ unsigned int addr1, addr2;
32433+
32434+#ifdef CONFIG_X86_64
32435+ if ((regs->ip + 9) >> 32)
32436+ break;
32437+#endif
32438+
32439+ err = get_user(mov, (unsigned char __user *)regs->ip);
32440+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
32441+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
32442+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
32443+
32444+ if (err)
32445+ break;
32446+
32447+ if (mov == 0xB9 && jmp == 0xE9) {
32448+ regs->cx = addr1;
32449+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
32450+ return 2;
32451+ }
32452+ } while (0);
32453+
32454+ return 1; /* PaX in action */
32455+}
32456+
32457+#ifdef CONFIG_X86_64
32458+static int pax_handle_fetch_fault_64(struct pt_regs *regs)
32459+{
32460+ int err;
32461+
32462+ do { /* PaX: libffi trampoline emulation */
32463+ unsigned short mov1, mov2, jmp1;
32464+ unsigned char stcclc, jmp2;
32465+ unsigned long addr1, addr2;
32466+
32467+ err = get_user(mov1, (unsigned short __user *)regs->ip);
32468+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
32469+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
32470+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
32471+ err |= get_user(stcclc, (unsigned char __user *)(regs->ip + 20));
32472+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 21));
32473+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 23));
32474+
32475+ if (err)
32476+ break;
32477+
32478+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && (stcclc == 0xF8 || stcclc == 0xF9) && jmp1 == 0xFF49 && jmp2 == 0xE3) {
32479+ regs->r11 = addr1;
32480+ regs->r10 = addr2;
32481+ if (stcclc == 0xF8)
32482+ regs->flags &= ~X86_EFLAGS_CF;
32483+ else
32484+ regs->flags |= X86_EFLAGS_CF;
32485+ regs->ip = addr1;
32486+ return 2;
32487+ }
32488+ } while (0);
32489+
32490+ do { /* PaX: gcc trampoline emulation #1 */
32491+ unsigned short mov1, mov2, jmp1;
32492+ unsigned char jmp2;
32493+ unsigned int addr1;
32494+ unsigned long addr2;
32495+
32496+ err = get_user(mov1, (unsigned short __user *)regs->ip);
32497+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
32498+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
32499+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
32500+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
32501+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
32502+
32503+ if (err)
32504+ break;
32505+
32506+ if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
32507+ regs->r11 = addr1;
32508+ regs->r10 = addr2;
32509+ regs->ip = addr1;
32510+ return 2;
32511+ }
32512+ } while (0);
32513+
32514+ do { /* PaX: gcc trampoline emulation #2 */
32515+ unsigned short mov1, mov2, jmp1;
32516+ unsigned char jmp2;
32517+ unsigned long addr1, addr2;
32518+
32519+ err = get_user(mov1, (unsigned short __user *)regs->ip);
32520+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
32521+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
32522+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
32523+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
32524+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
32525+
32526+ if (err)
32527+ break;
32528+
32529+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
32530+ regs->r11 = addr1;
32531+ regs->r10 = addr2;
32532+ regs->ip = addr1;
32533+ return 2;
32534+ }
32535+ } while (0);
32536+
32537+ return 1; /* PaX in action */
32538+}
32539+#endif
32540+
32541+/*
32542+ * PaX: decide what to do with offenders (regs->ip = fault address)
32543+ *
32544+ * returns 1 when task should be killed
32545+ * 2 when gcc trampoline was detected
32546+ */
32547+static int pax_handle_fetch_fault(struct pt_regs *regs)
32548+{
32549+ if (v8086_mode(regs))
32550+ return 1;
32551+
32552+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
32553+ return 1;
32554+
32555+#ifdef CONFIG_X86_32
32556+ return pax_handle_fetch_fault_32(regs);
32557+#else
32558+ if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
32559+ return pax_handle_fetch_fault_32(regs);
32560+ else
32561+ return pax_handle_fetch_fault_64(regs);
32562+#endif
32563+}
32564+#endif
32565+
32566+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
32567+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
32568+{
32569+ long i;
32570+
32571+ printk(KERN_ERR "PAX: bytes at PC: ");
32572+ for (i = 0; i < 20; i++) {
32573+ unsigned char c;
32574+ if (get_user(c, (unsigned char __force_user *)pc+i))
32575+ printk(KERN_CONT "?? ");
32576+ else
32577+ printk(KERN_CONT "%02x ", c);
32578+ }
32579+ printk("\n");
32580+
32581+ printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
32582+ for (i = -1; i < 80 / (long)sizeof(long); i++) {
32583+ unsigned long c;
32584+ if (get_user(c, (unsigned long __force_user *)sp+i)) {
32585+#ifdef CONFIG_X86_32
32586+ printk(KERN_CONT "???????? ");
32587+#else
32588+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
32589+ printk(KERN_CONT "???????? ???????? ");
32590+ else
32591+ printk(KERN_CONT "???????????????? ");
32592+#endif
32593+ } else {
32594+#ifdef CONFIG_X86_64
32595+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
32596+ printk(KERN_CONT "%08x ", (unsigned int)c);
32597+ printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
32598+ } else
32599+#endif
32600+ printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
32601+ }
32602+ }
32603+ printk("\n");
32604+}
32605+#endif
32606+
32607+/**
32608+ * probe_kernel_write(): safely attempt to write to a location
32609+ * @dst: address to write to
32610+ * @src: pointer to the data that shall be written
32611+ * @size: size of the data chunk
32612+ *
32613+ * Safely write to address @dst from the buffer at @src. If a kernel fault
32614+ * happens, handle that and return -EFAULT.
32615+ */
32616+long notrace probe_kernel_write(void *dst, const void *src, size_t size)
32617+{
32618+ long ret;
32619+ mm_segment_t old_fs = get_fs();
32620+
32621+ set_fs(KERNEL_DS);
32622+ pagefault_disable();
32623+ pax_open_kernel();
32624+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
32625+ pax_close_kernel();
32626+ pagefault_enable();
32627+ set_fs(old_fs);
32628+
32629+ return ret ? -EFAULT : 0;
32630+}
32631diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
32632index 81bf3d2..7ef25c2 100644
32633--- a/arch/x86/mm/gup.c
32634+++ b/arch/x86/mm/gup.c
32635@@ -268,7 +268,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
32636 addr = start;
32637 len = (unsigned long) nr_pages << PAGE_SHIFT;
32638 end = start + len;
32639- if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
32640+ if (unlikely(!access_ok_noprefault(write ? VERIFY_WRITE : VERIFY_READ,
32641 (void __user *)start, len)))
32642 return 0;
32643
32644@@ -344,6 +344,10 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
32645 goto slow_irqon;
32646 #endif
32647
32648+ if (unlikely(!access_ok_noprefault(write ? VERIFY_WRITE : VERIFY_READ,
32649+ (void __user *)start, len)))
32650+ return 0;
32651+
32652 /*
32653 * XXX: batch / limit 'nr', to avoid large irq off latency
32654 * needs some instrumenting to determine the common sizes used by
32655diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
32656index 4500142..53a363c 100644
32657--- a/arch/x86/mm/highmem_32.c
32658+++ b/arch/x86/mm/highmem_32.c
32659@@ -45,7 +45,11 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
32660 idx = type + KM_TYPE_NR*smp_processor_id();
32661 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
32662 BUG_ON(!pte_none(*(kmap_pte-idx)));
32663+
32664+ pax_open_kernel();
32665 set_pte(kmap_pte-idx, mk_pte(page, prot));
32666+ pax_close_kernel();
32667+
32668 arch_flush_lazy_mmu_mode();
32669
32670 return (void *)vaddr;
32671diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
32672index 42982b2..7168fc3 100644
32673--- a/arch/x86/mm/hugetlbpage.c
32674+++ b/arch/x86/mm/hugetlbpage.c
32675@@ -74,23 +74,24 @@ int pud_huge(pud_t pud)
32676 #ifdef CONFIG_HUGETLB_PAGE
32677 static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
32678 unsigned long addr, unsigned long len,
32679- unsigned long pgoff, unsigned long flags)
32680+ unsigned long pgoff, unsigned long flags, unsigned long offset)
32681 {
32682 struct hstate *h = hstate_file(file);
32683 struct vm_unmapped_area_info info;
32684-
32685+
32686 info.flags = 0;
32687 info.length = len;
32688 info.low_limit = current->mm->mmap_legacy_base;
32689 info.high_limit = TASK_SIZE;
32690 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
32691 info.align_offset = 0;
32692+ info.threadstack_offset = offset;
32693 return vm_unmapped_area(&info);
32694 }
32695
32696 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
32697 unsigned long addr0, unsigned long len,
32698- unsigned long pgoff, unsigned long flags)
32699+ unsigned long pgoff, unsigned long flags, unsigned long offset)
32700 {
32701 struct hstate *h = hstate_file(file);
32702 struct vm_unmapped_area_info info;
32703@@ -102,6 +103,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
32704 info.high_limit = current->mm->mmap_base;
32705 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
32706 info.align_offset = 0;
32707+ info.threadstack_offset = offset;
32708 addr = vm_unmapped_area(&info);
32709
32710 /*
32711@@ -114,6 +116,12 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
32712 VM_BUG_ON(addr != -ENOMEM);
32713 info.flags = 0;
32714 info.low_limit = TASK_UNMAPPED_BASE;
32715+
32716+#ifdef CONFIG_PAX_RANDMMAP
32717+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
32718+ info.low_limit += current->mm->delta_mmap;
32719+#endif
32720+
32721 info.high_limit = TASK_SIZE;
32722 addr = vm_unmapped_area(&info);
32723 }
32724@@ -128,10 +136,20 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
32725 struct hstate *h = hstate_file(file);
32726 struct mm_struct *mm = current->mm;
32727 struct vm_area_struct *vma;
32728+ unsigned long pax_task_size = TASK_SIZE;
32729+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
32730
32731 if (len & ~huge_page_mask(h))
32732 return -EINVAL;
32733- if (len > TASK_SIZE)
32734+
32735+#ifdef CONFIG_PAX_SEGMEXEC
32736+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
32737+ pax_task_size = SEGMEXEC_TASK_SIZE;
32738+#endif
32739+
32740+ pax_task_size -= PAGE_SIZE;
32741+
32742+ if (len > pax_task_size)
32743 return -ENOMEM;
32744
32745 if (flags & MAP_FIXED) {
32746@@ -140,19 +158,22 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
32747 return addr;
32748 }
32749
32750+#ifdef CONFIG_PAX_RANDMMAP
32751+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
32752+#endif
32753+
32754 if (addr) {
32755 addr = ALIGN(addr, huge_page_size(h));
32756 vma = find_vma(mm, addr);
32757- if (TASK_SIZE - len >= addr &&
32758- (!vma || addr + len <= vma->vm_start))
32759+ if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
32760 return addr;
32761 }
32762 if (mm->get_unmapped_area == arch_get_unmapped_area)
32763 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
32764- pgoff, flags);
32765+ pgoff, flags, offset);
32766 else
32767 return hugetlb_get_unmapped_area_topdown(file, addr, len,
32768- pgoff, flags);
32769+ pgoff, flags, offset);
32770 }
32771 #endif /* CONFIG_HUGETLB_PAGE */
32772
32773diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
32774index a110efc..a31a18f 100644
32775--- a/arch/x86/mm/init.c
32776+++ b/arch/x86/mm/init.c
32777@@ -4,6 +4,7 @@
32778 #include <linux/swap.h>
32779 #include <linux/memblock.h>
32780 #include <linux/bootmem.h> /* for max_low_pfn */
32781+#include <linux/tboot.h>
32782
32783 #include <asm/cacheflush.h>
32784 #include <asm/e820.h>
32785@@ -17,6 +18,8 @@
32786 #include <asm/proto.h>
32787 #include <asm/dma.h> /* for MAX_DMA_PFN */
32788 #include <asm/microcode.h>
32789+#include <asm/desc.h>
32790+#include <asm/bios_ebda.h>
32791
32792 /*
32793 * We need to define the tracepoints somewhere, and tlb.c
32794@@ -620,7 +623,18 @@ void __init init_mem_mapping(void)
32795 early_ioremap_page_table_range_init();
32796 #endif
32797
32798+#ifdef CONFIG_PAX_PER_CPU_PGD
32799+ clone_pgd_range(get_cpu_pgd(0, kernel) + KERNEL_PGD_BOUNDARY,
32800+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
32801+ KERNEL_PGD_PTRS);
32802+ clone_pgd_range(get_cpu_pgd(0, user) + KERNEL_PGD_BOUNDARY,
32803+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
32804+ KERNEL_PGD_PTRS);
32805+ load_cr3(get_cpu_pgd(0, kernel));
32806+#else
32807 load_cr3(swapper_pg_dir);
32808+#endif
32809+
32810 __flush_tlb_all();
32811
32812 early_memtest(0, max_pfn_mapped << PAGE_SHIFT);
32813@@ -636,10 +650,40 @@ void __init init_mem_mapping(void)
32814 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
32815 * mmio resources as well as potential bios/acpi data regions.
32816 */
32817+
32818+#ifdef CONFIG_GRKERNSEC_KMEM
32819+static unsigned int ebda_start __read_only;
32820+static unsigned int ebda_end __read_only;
32821+#endif
32822+
32823 int devmem_is_allowed(unsigned long pagenr)
32824 {
32825- if (pagenr < 256)
32826+#ifdef CONFIG_GRKERNSEC_KMEM
32827+ /* allow BDA */
32828+ if (!pagenr)
32829 return 1;
32830+ /* allow EBDA */
32831+ if (pagenr >= ebda_start && pagenr < ebda_end)
32832+ return 1;
32833+ /* if tboot is in use, allow access to its hardcoded serial log range */
32834+ if (tboot_enabled() && ((0x60000 >> PAGE_SHIFT) <= pagenr) && (pagenr < (0x68000 >> PAGE_SHIFT)))
32835+ return 1;
32836+#else
32837+ if (!pagenr)
32838+ return 1;
32839+#ifdef CONFIG_VM86
32840+ if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
32841+ return 1;
32842+#endif
32843+#endif
32844+
32845+ if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
32846+ return 1;
32847+#ifdef CONFIG_GRKERNSEC_KMEM
32848+ /* throw out everything else below 1MB */
32849+ if (pagenr <= 256)
32850+ return 0;
32851+#endif
32852 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
32853 return 0;
32854 if (!page_is_ram(pagenr))
32855@@ -685,8 +729,117 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
32856 #endif
32857 }
32858
32859+#ifdef CONFIG_GRKERNSEC_KMEM
32860+static inline void gr_init_ebda(void)
32861+{
32862+ unsigned int ebda_addr;
32863+ unsigned int ebda_size = 0;
32864+
32865+ ebda_addr = get_bios_ebda();
32866+ if (ebda_addr) {
32867+ ebda_size = *(unsigned char *)phys_to_virt(ebda_addr);
32868+ ebda_size <<= 10;
32869+ }
32870+ if (ebda_addr && ebda_size) {
32871+ ebda_start = ebda_addr >> PAGE_SHIFT;
32872+ ebda_end = min((unsigned int)PAGE_ALIGN(ebda_addr + ebda_size), (unsigned int)0xa0000) >> PAGE_SHIFT;
32873+ } else {
32874+ ebda_start = 0x9f000 >> PAGE_SHIFT;
32875+ ebda_end = 0xa0000 >> PAGE_SHIFT;
32876+ }
32877+}
32878+#else
32879+static inline void gr_init_ebda(void) { }
32880+#endif
32881+
32882 void free_initmem(void)
32883 {
32884+#ifdef CONFIG_PAX_KERNEXEC
32885+#ifdef CONFIG_X86_32
32886+ /* PaX: limit KERNEL_CS to actual size */
32887+ unsigned long addr, limit;
32888+ struct desc_struct d;
32889+ int cpu;
32890+#else
32891+ pgd_t *pgd;
32892+ pud_t *pud;
32893+ pmd_t *pmd;
32894+ unsigned long addr, end;
32895+#endif
32896+#endif
32897+
32898+ gr_init_ebda();
32899+
32900+#ifdef CONFIG_PAX_KERNEXEC
32901+#ifdef CONFIG_X86_32
32902+ limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
32903+ limit = (limit - 1UL) >> PAGE_SHIFT;
32904+
32905+ memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
32906+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
32907+ pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
32908+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
32909+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEXEC_KERNEL_CS, &d, DESCTYPE_S);
32910+ }
32911+
32912+ /* PaX: make KERNEL_CS read-only */
32913+ addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
32914+ if (!paravirt_enabled())
32915+ set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
32916+/*
32917+ for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
32918+ pgd = pgd_offset_k(addr);
32919+ pud = pud_offset(pgd, addr);
32920+ pmd = pmd_offset(pud, addr);
32921+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
32922+ }
32923+*/
32924+#ifdef CONFIG_X86_PAE
32925+ set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
32926+/*
32927+ for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
32928+ pgd = pgd_offset_k(addr);
32929+ pud = pud_offset(pgd, addr);
32930+ pmd = pmd_offset(pud, addr);
32931+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
32932+ }
32933+*/
32934+#endif
32935+
32936+#ifdef CONFIG_MODULES
32937+ set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
32938+#endif
32939+
32940+#else
32941+ /* PaX: make kernel code/rodata read-only, rest non-executable */
32942+ for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
32943+ pgd = pgd_offset_k(addr);
32944+ pud = pud_offset(pgd, addr);
32945+ pmd = pmd_offset(pud, addr);
32946+ if (!pmd_present(*pmd))
32947+ continue;
32948+ if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
32949+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
32950+ else
32951+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
32952+ }
32953+
32954+ addr = (unsigned long)__va(__pa(__START_KERNEL_map));
32955+ end = addr + KERNEL_IMAGE_SIZE;
32956+ for (; addr < end; addr += PMD_SIZE) {
32957+ pgd = pgd_offset_k(addr);
32958+ pud = pud_offset(pgd, addr);
32959+ pmd = pmd_offset(pud, addr);
32960+ if (!pmd_present(*pmd))
32961+ continue;
32962+ if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
32963+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
32964+ }
32965+#endif
32966+
32967+ flush_tlb_all();
32968+#endif
32969+
32970 free_init_pages("unused kernel",
32971 (unsigned long)(&__init_begin),
32972 (unsigned long)(&__init_end));
32973diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
32974index c8140e1..59257fc 100644
32975--- a/arch/x86/mm/init_32.c
32976+++ b/arch/x86/mm/init_32.c
32977@@ -62,33 +62,6 @@ static noinline int do_test_wp_bit(void);
32978 bool __read_mostly __vmalloc_start_set = false;
32979
32980 /*
32981- * Creates a middle page table and puts a pointer to it in the
32982- * given global directory entry. This only returns the gd entry
32983- * in non-PAE compilation mode, since the middle layer is folded.
32984- */
32985-static pmd_t * __init one_md_table_init(pgd_t *pgd)
32986-{
32987- pud_t *pud;
32988- pmd_t *pmd_table;
32989-
32990-#ifdef CONFIG_X86_PAE
32991- if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
32992- pmd_table = (pmd_t *)alloc_low_page();
32993- paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
32994- set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
32995- pud = pud_offset(pgd, 0);
32996- BUG_ON(pmd_table != pmd_offset(pud, 0));
32997-
32998- return pmd_table;
32999- }
33000-#endif
33001- pud = pud_offset(pgd, 0);
33002- pmd_table = pmd_offset(pud, 0);
33003-
33004- return pmd_table;
33005-}
33006-
33007-/*
33008 * Create a page table and place a pointer to it in a middle page
33009 * directory entry:
33010 */
33011@@ -98,13 +71,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
33012 pte_t *page_table = (pte_t *)alloc_low_page();
33013
33014 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
33015+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
33016+ set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
33017+#else
33018 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
33019+#endif
33020 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
33021 }
33022
33023 return pte_offset_kernel(pmd, 0);
33024 }
33025
33026+static pmd_t * __init one_md_table_init(pgd_t *pgd)
33027+{
33028+ pud_t *pud;
33029+ pmd_t *pmd_table;
33030+
33031+ pud = pud_offset(pgd, 0);
33032+ pmd_table = pmd_offset(pud, 0);
33033+
33034+ return pmd_table;
33035+}
33036+
33037 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
33038 {
33039 int pgd_idx = pgd_index(vaddr);
33040@@ -208,6 +196,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
33041 int pgd_idx, pmd_idx;
33042 unsigned long vaddr;
33043 pgd_t *pgd;
33044+ pud_t *pud;
33045 pmd_t *pmd;
33046 pte_t *pte = NULL;
33047 unsigned long count = page_table_range_init_count(start, end);
33048@@ -222,8 +211,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
33049 pgd = pgd_base + pgd_idx;
33050
33051 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
33052- pmd = one_md_table_init(pgd);
33053- pmd = pmd + pmd_index(vaddr);
33054+ pud = pud_offset(pgd, vaddr);
33055+ pmd = pmd_offset(pud, vaddr);
33056+
33057+#ifdef CONFIG_X86_PAE
33058+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
33059+#endif
33060+
33061 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
33062 pmd++, pmd_idx++) {
33063 pte = page_table_kmap_check(one_page_table_init(pmd),
33064@@ -235,11 +229,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
33065 }
33066 }
33067
33068-static inline int is_kernel_text(unsigned long addr)
33069+static inline int is_kernel_text(unsigned long start, unsigned long end)
33070 {
33071- if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
33072- return 1;
33073- return 0;
33074+ if ((start >= ktla_ktva((unsigned long)_etext) ||
33075+ end <= ktla_ktva((unsigned long)_stext)) &&
33076+ (start >= ktla_ktva((unsigned long)_einittext) ||
33077+ end <= ktla_ktva((unsigned long)_sinittext)) &&
33078+
33079+#ifdef CONFIG_ACPI_SLEEP
33080+ (start >= (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
33081+#endif
33082+
33083+ (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
33084+ return 0;
33085+ return 1;
33086 }
33087
33088 /*
33089@@ -256,9 +259,10 @@ kernel_physical_mapping_init(unsigned long start,
33090 unsigned long last_map_addr = end;
33091 unsigned long start_pfn, end_pfn;
33092 pgd_t *pgd_base = swapper_pg_dir;
33093- int pgd_idx, pmd_idx, pte_ofs;
33094+ unsigned int pgd_idx, pmd_idx, pte_ofs;
33095 unsigned long pfn;
33096 pgd_t *pgd;
33097+ pud_t *pud;
33098 pmd_t *pmd;
33099 pte_t *pte;
33100 unsigned pages_2m, pages_4k;
33101@@ -291,8 +295,13 @@ repeat:
33102 pfn = start_pfn;
33103 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
33104 pgd = pgd_base + pgd_idx;
33105- for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
33106- pmd = one_md_table_init(pgd);
33107+ for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
33108+ pud = pud_offset(pgd, 0);
33109+ pmd = pmd_offset(pud, 0);
33110+
33111+#ifdef CONFIG_X86_PAE
33112+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
33113+#endif
33114
33115 if (pfn >= end_pfn)
33116 continue;
33117@@ -304,14 +313,13 @@ repeat:
33118 #endif
33119 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
33120 pmd++, pmd_idx++) {
33121- unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
33122+ unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
33123
33124 /*
33125 * Map with big pages if possible, otherwise
33126 * create normal page tables:
33127 */
33128 if (use_pse) {
33129- unsigned int addr2;
33130 pgprot_t prot = PAGE_KERNEL_LARGE;
33131 /*
33132 * first pass will use the same initial
33133@@ -322,11 +330,7 @@ repeat:
33134 _PAGE_PSE);
33135
33136 pfn &= PMD_MASK >> PAGE_SHIFT;
33137- addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
33138- PAGE_OFFSET + PAGE_SIZE-1;
33139-
33140- if (is_kernel_text(addr) ||
33141- is_kernel_text(addr2))
33142+ if (is_kernel_text(address, address + PMD_SIZE))
33143 prot = PAGE_KERNEL_LARGE_EXEC;
33144
33145 pages_2m++;
33146@@ -343,7 +347,7 @@ repeat:
33147 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
33148 pte += pte_ofs;
33149 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
33150- pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
33151+ pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
33152 pgprot_t prot = PAGE_KERNEL;
33153 /*
33154 * first pass will use the same initial
33155@@ -351,7 +355,7 @@ repeat:
33156 */
33157 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
33158
33159- if (is_kernel_text(addr))
33160+ if (is_kernel_text(address, address + PAGE_SIZE))
33161 prot = PAGE_KERNEL_EXEC;
33162
33163 pages_4k++;
33164@@ -474,7 +478,7 @@ void __init native_pagetable_init(void)
33165
33166 pud = pud_offset(pgd, va);
33167 pmd = pmd_offset(pud, va);
33168- if (!pmd_present(*pmd))
33169+ if (!pmd_present(*pmd)) // PAX TODO || pmd_large(*pmd))
33170 break;
33171
33172 /* should not be large page here */
33173@@ -532,12 +536,10 @@ void __init early_ioremap_page_table_range_init(void)
33174
33175 static void __init pagetable_init(void)
33176 {
33177- pgd_t *pgd_base = swapper_pg_dir;
33178-
33179- permanent_kmaps_init(pgd_base);
33180+ permanent_kmaps_init(swapper_pg_dir);
33181 }
33182
33183-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL);
33184+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL);
33185 EXPORT_SYMBOL_GPL(__supported_pte_mask);
33186
33187 /* user-defined highmem size */
33188@@ -787,10 +789,10 @@ void __init mem_init(void)
33189 ((unsigned long)&__init_end -
33190 (unsigned long)&__init_begin) >> 10,
33191
33192- (unsigned long)&_etext, (unsigned long)&_edata,
33193- ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
33194+ (unsigned long)&_sdata, (unsigned long)&_edata,
33195+ ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
33196
33197- (unsigned long)&_text, (unsigned long)&_etext,
33198+ ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
33199 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
33200
33201 /*
33202@@ -884,6 +886,7 @@ void set_kernel_text_rw(void)
33203 if (!kernel_set_to_readonly)
33204 return;
33205
33206+ start = ktla_ktva(start);
33207 pr_debug("Set kernel text: %lx - %lx for read write\n",
33208 start, start+size);
33209
33210@@ -898,6 +901,7 @@ void set_kernel_text_ro(void)
33211 if (!kernel_set_to_readonly)
33212 return;
33213
33214+ start = ktla_ktva(start);
33215 pr_debug("Set kernel text: %lx - %lx for read only\n",
33216 start, start+size);
33217
33218@@ -926,6 +930,7 @@ void mark_rodata_ro(void)
33219 unsigned long start = PFN_ALIGN(_text);
33220 unsigned long size = PFN_ALIGN(_etext) - start;
33221
33222+ start = ktla_ktva(start);
33223 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
33224 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
33225 size >> 10);
33226diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
33227index 30eb05a..ae671ac 100644
33228--- a/arch/x86/mm/init_64.c
33229+++ b/arch/x86/mm/init_64.c
33230@@ -150,7 +150,7 @@ early_param("gbpages", parse_direct_gbpages_on);
33231 * around without checking the pgd every time.
33232 */
33233
33234-pteval_t __supported_pte_mask __read_mostly = ~0;
33235+pteval_t __supported_pte_mask __read_only = ~_PAGE_NX;
33236 EXPORT_SYMBOL_GPL(__supported_pte_mask);
33237
33238 int force_personality32;
33239@@ -183,7 +183,12 @@ void sync_global_pgds(unsigned long start, unsigned long end, int removed)
33240
33241 for (address = start; address <= end; address += PGDIR_SIZE) {
33242 const pgd_t *pgd_ref = pgd_offset_k(address);
33243+
33244+#ifdef CONFIG_PAX_PER_CPU_PGD
33245+ unsigned long cpu;
33246+#else
33247 struct page *page;
33248+#endif
33249
33250 /*
33251 * When it is called after memory hot remove, pgd_none()
33252@@ -194,6 +199,25 @@ void sync_global_pgds(unsigned long start, unsigned long end, int removed)
33253 continue;
33254
33255 spin_lock(&pgd_lock);
33256+
33257+#ifdef CONFIG_PAX_PER_CPU_PGD
33258+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
33259+ pgd_t *pgd = pgd_offset_cpu(cpu, user, address);
33260+
33261+ if (!pgd_none(*pgd_ref) && !pgd_none(*pgd))
33262+ BUG_ON(pgd_page_vaddr(*pgd)
33263+ != pgd_page_vaddr(*pgd_ref));
33264+
33265+ if (removed) {
33266+ if (pgd_none(*pgd_ref) && !pgd_none(*pgd))
33267+ pgd_clear(pgd);
33268+ } else {
33269+ if (pgd_none(*pgd))
33270+ set_pgd(pgd, *pgd_ref);
33271+ }
33272+
33273+ pgd = pgd_offset_cpu(cpu, kernel, address);
33274+#else
33275 list_for_each_entry(page, &pgd_list, lru) {
33276 pgd_t *pgd;
33277 spinlock_t *pgt_lock;
33278@@ -202,6 +226,7 @@ void sync_global_pgds(unsigned long start, unsigned long end, int removed)
33279 /* the pgt_lock only for Xen */
33280 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
33281 spin_lock(pgt_lock);
33282+#endif
33283
33284 if (!pgd_none(*pgd_ref) && !pgd_none(*pgd))
33285 BUG_ON(pgd_page_vaddr(*pgd)
33286@@ -215,7 +240,10 @@ void sync_global_pgds(unsigned long start, unsigned long end, int removed)
33287 set_pgd(pgd, *pgd_ref);
33288 }
33289
33290+#ifndef CONFIG_PAX_PER_CPU_PGD
33291 spin_unlock(pgt_lock);
33292+#endif
33293+
33294 }
33295 spin_unlock(&pgd_lock);
33296 }
33297@@ -248,7 +276,7 @@ static pud_t *fill_pud(pgd_t *pgd, unsigned long vaddr)
33298 {
33299 if (pgd_none(*pgd)) {
33300 pud_t *pud = (pud_t *)spp_getpage();
33301- pgd_populate(&init_mm, pgd, pud);
33302+ pgd_populate_kernel(&init_mm, pgd, pud);
33303 if (pud != pud_offset(pgd, 0))
33304 printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n",
33305 pud, pud_offset(pgd, 0));
33306@@ -260,7 +288,7 @@ static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr)
33307 {
33308 if (pud_none(*pud)) {
33309 pmd_t *pmd = (pmd_t *) spp_getpage();
33310- pud_populate(&init_mm, pud, pmd);
33311+ pud_populate_kernel(&init_mm, pud, pmd);
33312 if (pmd != pmd_offset(pud, 0))
33313 printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
33314 pmd, pmd_offset(pud, 0));
33315@@ -289,7 +317,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
33316 pmd = fill_pmd(pud, vaddr);
33317 pte = fill_pte(pmd, vaddr);
33318
33319+ pax_open_kernel();
33320 set_pte(pte, new_pte);
33321+ pax_close_kernel();
33322
33323 /*
33324 * It's enough to flush this one mapping.
33325@@ -351,14 +381,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
33326 pgd = pgd_offset_k((unsigned long)__va(phys));
33327 if (pgd_none(*pgd)) {
33328 pud = (pud_t *) spp_getpage();
33329- set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
33330- _PAGE_USER));
33331+ set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
33332 }
33333 pud = pud_offset(pgd, (unsigned long)__va(phys));
33334 if (pud_none(*pud)) {
33335 pmd = (pmd_t *) spp_getpage();
33336- set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
33337- _PAGE_USER));
33338+ set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
33339 }
33340 pmd = pmd_offset(pud, phys);
33341 BUG_ON(!pmd_none(*pmd));
33342@@ -599,7 +627,7 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
33343 prot);
33344
33345 spin_lock(&init_mm.page_table_lock);
33346- pud_populate(&init_mm, pud, pmd);
33347+ pud_populate_kernel(&init_mm, pud, pmd);
33348 spin_unlock(&init_mm.page_table_lock);
33349 }
33350 __flush_tlb_all();
33351@@ -640,7 +668,7 @@ kernel_physical_mapping_init(unsigned long start,
33352 page_size_mask);
33353
33354 spin_lock(&init_mm.page_table_lock);
33355- pgd_populate(&init_mm, pgd, pud);
33356+ pgd_populate_kernel(&init_mm, pgd, pud);
33357 spin_unlock(&init_mm.page_table_lock);
33358 pgd_changed = true;
33359 }
33360diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
33361index 9ca35fc..4b2b7b7 100644
33362--- a/arch/x86/mm/iomap_32.c
33363+++ b/arch/x86/mm/iomap_32.c
33364@@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
33365 type = kmap_atomic_idx_push();
33366 idx = type + KM_TYPE_NR * smp_processor_id();
33367 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
33368+
33369+ pax_open_kernel();
33370 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
33371+ pax_close_kernel();
33372+
33373 arch_flush_lazy_mmu_mode();
33374
33375 return (void *)vaddr;
33376diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
33377index fdf617c..b9e85bc 100644
33378--- a/arch/x86/mm/ioremap.c
33379+++ b/arch/x86/mm/ioremap.c
33380@@ -56,8 +56,8 @@ static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
33381 unsigned long i;
33382
33383 for (i = 0; i < nr_pages; ++i)
33384- if (pfn_valid(start_pfn + i) &&
33385- !PageReserved(pfn_to_page(start_pfn + i)))
33386+ if (pfn_valid(start_pfn + i) && (start_pfn + i >= 0x100 ||
33387+ !PageReserved(pfn_to_page(start_pfn + i))))
33388 return 1;
33389
33390 WARN_ONCE(1, "ioremap on RAM pfn 0x%lx\n", start_pfn);
33391@@ -283,7 +283,7 @@ EXPORT_SYMBOL(ioremap_prot);
33392 *
33393 * Caller must ensure there is only one unmapping for the same pointer.
33394 */
33395-void iounmap(volatile void __iomem *addr)
33396+void iounmap(const volatile void __iomem *addr)
33397 {
33398 struct vm_struct *p, *o;
33399
33400@@ -332,30 +332,29 @@ EXPORT_SYMBOL(iounmap);
33401 */
33402 void *xlate_dev_mem_ptr(phys_addr_t phys)
33403 {
33404- void *addr;
33405- unsigned long start = phys & PAGE_MASK;
33406-
33407 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
33408- if (page_is_ram(start >> PAGE_SHIFT))
33409+ if (page_is_ram(phys >> PAGE_SHIFT))
33410+#ifdef CONFIG_HIGHMEM
33411+ if ((phys >> PAGE_SHIFT) < max_low_pfn)
33412+#endif
33413 return __va(phys);
33414
33415- addr = (void __force *)ioremap_cache(start, PAGE_SIZE);
33416- if (addr)
33417- addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK));
33418-
33419- return addr;
33420+ return (void __force *)ioremap_cache(phys, PAGE_SIZE);
33421 }
33422
33423 void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
33424 {
33425 if (page_is_ram(phys >> PAGE_SHIFT))
33426+#ifdef CONFIG_HIGHMEM
33427+ if ((phys >> PAGE_SHIFT) < max_low_pfn)
33428+#endif
33429 return;
33430
33431 iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
33432 return;
33433 }
33434
33435-static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
33436+static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
33437
33438 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
33439 {
33440@@ -391,8 +390,7 @@ void __init early_ioremap_init(void)
33441 early_ioremap_setup();
33442
33443 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
33444- memset(bm_pte, 0, sizeof(bm_pte));
33445- pmd_populate_kernel(&init_mm, pmd, bm_pte);
33446+ pmd_populate_user(&init_mm, pmd, bm_pte);
33447
33448 /*
33449 * The boot-ioremap range spans multiple pmds, for which
33450diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
33451index b4f2e7e..96c9c3e 100644
33452--- a/arch/x86/mm/kmemcheck/kmemcheck.c
33453+++ b/arch/x86/mm/kmemcheck/kmemcheck.c
33454@@ -628,9 +628,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
33455 * memory (e.g. tracked pages)? For now, we need this to avoid
33456 * invoking kmemcheck for PnP BIOS calls.
33457 */
33458- if (regs->flags & X86_VM_MASK)
33459+ if (v8086_mode(regs))
33460 return false;
33461- if (regs->cs != __KERNEL_CS)
33462+ if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
33463 return false;
33464
33465 pte = kmemcheck_pte_lookup(address);
33466diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
33467index df4552b..12c129c 100644
33468--- a/arch/x86/mm/mmap.c
33469+++ b/arch/x86/mm/mmap.c
33470@@ -52,7 +52,7 @@ static unsigned long stack_maxrandom_size(void)
33471 * Leave an at least ~128 MB hole with possible stack randomization.
33472 */
33473 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
33474-#define MAX_GAP (TASK_SIZE/6*5)
33475+#define MAX_GAP (pax_task_size/6*5)
33476
33477 static int mmap_is_legacy(void)
33478 {
33479@@ -82,27 +82,40 @@ static unsigned long mmap_rnd(void)
33480 return rnd << PAGE_SHIFT;
33481 }
33482
33483-static unsigned long mmap_base(void)
33484+static unsigned long mmap_base(struct mm_struct *mm)
33485 {
33486 unsigned long gap = rlimit(RLIMIT_STACK);
33487+ unsigned long pax_task_size = TASK_SIZE;
33488+
33489+#ifdef CONFIG_PAX_SEGMEXEC
33490+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
33491+ pax_task_size = SEGMEXEC_TASK_SIZE;
33492+#endif
33493
33494 if (gap < MIN_GAP)
33495 gap = MIN_GAP;
33496 else if (gap > MAX_GAP)
33497 gap = MAX_GAP;
33498
33499- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
33500+ return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
33501 }
33502
33503 /*
33504 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
33505 * does, but not when emulating X86_32
33506 */
33507-static unsigned long mmap_legacy_base(void)
33508+static unsigned long mmap_legacy_base(struct mm_struct *mm)
33509 {
33510- if (mmap_is_ia32())
33511+ if (mmap_is_ia32()) {
33512+
33513+#ifdef CONFIG_PAX_SEGMEXEC
33514+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
33515+ return SEGMEXEC_TASK_UNMAPPED_BASE;
33516+ else
33517+#endif
33518+
33519 return TASK_UNMAPPED_BASE;
33520- else
33521+ } else
33522 return TASK_UNMAPPED_BASE + mmap_rnd();
33523 }
33524
33525@@ -112,8 +125,15 @@ static unsigned long mmap_legacy_base(void)
33526 */
33527 void arch_pick_mmap_layout(struct mm_struct *mm)
33528 {
33529- mm->mmap_legacy_base = mmap_legacy_base();
33530- mm->mmap_base = mmap_base();
33531+ mm->mmap_legacy_base = mmap_legacy_base(mm);
33532+ mm->mmap_base = mmap_base(mm);
33533+
33534+#ifdef CONFIG_PAX_RANDMMAP
33535+ if (mm->pax_flags & MF_PAX_RANDMMAP) {
33536+ mm->mmap_legacy_base += mm->delta_mmap;
33537+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
33538+ }
33539+#endif
33540
33541 if (mmap_is_legacy()) {
33542 mm->mmap_base = mm->mmap_legacy_base;
33543diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
33544index 0057a7a..95c7edd 100644
33545--- a/arch/x86/mm/mmio-mod.c
33546+++ b/arch/x86/mm/mmio-mod.c
33547@@ -194,7 +194,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
33548 break;
33549 default:
33550 {
33551- unsigned char *ip = (unsigned char *)instptr;
33552+ unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
33553 my_trace->opcode = MMIO_UNKNOWN_OP;
33554 my_trace->width = 0;
33555 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
33556@@ -234,7 +234,7 @@ static void post(struct kmmio_probe *p, unsigned long condition,
33557 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
33558 void __iomem *addr)
33559 {
33560- static atomic_t next_id;
33561+ static atomic_unchecked_t next_id;
33562 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
33563 /* These are page-unaligned. */
33564 struct mmiotrace_map map = {
33565@@ -258,7 +258,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
33566 .private = trace
33567 },
33568 .phys = offset,
33569- .id = atomic_inc_return(&next_id)
33570+ .id = atomic_inc_return_unchecked(&next_id)
33571 };
33572 map.map_id = trace->id;
33573
33574@@ -290,7 +290,7 @@ void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
33575 ioremap_trace_core(offset, size, addr);
33576 }
33577
33578-static void iounmap_trace_core(volatile void __iomem *addr)
33579+static void iounmap_trace_core(const volatile void __iomem *addr)
33580 {
33581 struct mmiotrace_map map = {
33582 .phys = 0,
33583@@ -328,7 +328,7 @@ not_enabled:
33584 }
33585 }
33586
33587-void mmiotrace_iounmap(volatile void __iomem *addr)
33588+void mmiotrace_iounmap(const volatile void __iomem *addr)
33589 {
33590 might_sleep();
33591 if (is_enabled()) /* recheck and proper locking in *_core() */
33592diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
33593index cd4785b..25188b6 100644
33594--- a/arch/x86/mm/numa.c
33595+++ b/arch/x86/mm/numa.c
33596@@ -499,7 +499,7 @@ static void __init numa_clear_kernel_node_hotplug(void)
33597 }
33598 }
33599
33600-static int __init numa_register_memblks(struct numa_meminfo *mi)
33601+static int __init __intentional_overflow(-1) numa_register_memblks(struct numa_meminfo *mi)
33602 {
33603 unsigned long uninitialized_var(pfn_align);
33604 int i, nid;
33605diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
33606index 536ea2f..f42c293 100644
33607--- a/arch/x86/mm/pageattr.c
33608+++ b/arch/x86/mm/pageattr.c
33609@@ -262,7 +262,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
33610 */
33611 #ifdef CONFIG_PCI_BIOS
33612 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
33613- pgprot_val(forbidden) |= _PAGE_NX;
33614+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
33615 #endif
33616
33617 /*
33618@@ -270,9 +270,10 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
33619 * Does not cover __inittext since that is gone later on. On
33620 * 64bit we do not enforce !NX on the low mapping
33621 */
33622- if (within(address, (unsigned long)_text, (unsigned long)_etext))
33623- pgprot_val(forbidden) |= _PAGE_NX;
33624+ if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
33625+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
33626
33627+#ifdef CONFIG_DEBUG_RODATA
33628 /*
33629 * The .rodata section needs to be read-only. Using the pfn
33630 * catches all aliases.
33631@@ -280,6 +281,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
33632 if (within(pfn, __pa_symbol(__start_rodata) >> PAGE_SHIFT,
33633 __pa_symbol(__end_rodata) >> PAGE_SHIFT))
33634 pgprot_val(forbidden) |= _PAGE_RW;
33635+#endif
33636
33637 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
33638 /*
33639@@ -318,6 +320,13 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
33640 }
33641 #endif
33642
33643+#ifdef CONFIG_PAX_KERNEXEC
33644+ if (within(pfn, __pa(ktla_ktva((unsigned long)&_text)), __pa((unsigned long)&_sdata))) {
33645+ pgprot_val(forbidden) |= _PAGE_RW;
33646+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
33647+ }
33648+#endif
33649+
33650 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
33651
33652 return prot;
33653@@ -440,23 +449,37 @@ EXPORT_SYMBOL_GPL(slow_virt_to_phys);
33654 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
33655 {
33656 /* change init_mm */
33657+ pax_open_kernel();
33658 set_pte_atomic(kpte, pte);
33659+
33660 #ifdef CONFIG_X86_32
33661 if (!SHARED_KERNEL_PMD) {
33662+
33663+#ifdef CONFIG_PAX_PER_CPU_PGD
33664+ unsigned long cpu;
33665+#else
33666 struct page *page;
33667+#endif
33668
33669+#ifdef CONFIG_PAX_PER_CPU_PGD
33670+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
33671+ pgd_t *pgd = get_cpu_pgd(cpu, kernel);
33672+#else
33673 list_for_each_entry(page, &pgd_list, lru) {
33674- pgd_t *pgd;
33675+ pgd_t *pgd = (pgd_t *)page_address(page);
33676+#endif
33677+
33678 pud_t *pud;
33679 pmd_t *pmd;
33680
33681- pgd = (pgd_t *)page_address(page) + pgd_index(address);
33682+ pgd += pgd_index(address);
33683 pud = pud_offset(pgd, address);
33684 pmd = pmd_offset(pud, address);
33685 set_pte_atomic((pte_t *)pmd, pte);
33686 }
33687 }
33688 #endif
33689+ pax_close_kernel();
33690 }
33691
33692 static int
33693diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
33694index 7ac6869..c0ba541 100644
33695--- a/arch/x86/mm/pat.c
33696+++ b/arch/x86/mm/pat.c
33697@@ -89,7 +89,7 @@ static inline enum page_cache_mode get_page_memtype(struct page *pg)
33698 unsigned long pg_flags = pg->flags & _PGMT_MASK;
33699
33700 if (pg_flags == _PGMT_DEFAULT)
33701- return -1;
33702+ return _PAGE_CACHE_MODE_NUM;
33703 else if (pg_flags == _PGMT_WC)
33704 return _PAGE_CACHE_MODE_WC;
33705 else if (pg_flags == _PGMT_UC_MINUS)
33706@@ -346,7 +346,7 @@ static int reserve_ram_pages_type(u64 start, u64 end,
33707
33708 page = pfn_to_page(pfn);
33709 type = get_page_memtype(page);
33710- if (type != -1) {
33711+ if (type != _PAGE_CACHE_MODE_NUM) {
33712 pr_info("reserve_ram_pages_type failed [mem %#010Lx-%#010Lx], track 0x%x, req 0x%x\n",
33713 start, end - 1, type, req_type);
33714 if (new_type)
33715@@ -498,7 +498,7 @@ int free_memtype(u64 start, u64 end)
33716
33717 if (!entry) {
33718 printk(KERN_INFO "%s:%d freeing invalid memtype [mem %#010Lx-%#010Lx]\n",
33719- current->comm, current->pid, start, end - 1);
33720+ current->comm, task_pid_nr(current), start, end - 1);
33721 return -EINVAL;
33722 }
33723
33724@@ -532,10 +532,10 @@ static enum page_cache_mode lookup_memtype(u64 paddr)
33725 page = pfn_to_page(paddr >> PAGE_SHIFT);
33726 rettype = get_page_memtype(page);
33727 /*
33728- * -1 from get_page_memtype() implies RAM page is in its
33729+ * _PAGE_CACHE_MODE_NUM from get_page_memtype() implies RAM page is in its
33730 * default state and not reserved, and hence of type WB
33731 */
33732- if (rettype == -1)
33733+ if (rettype == _PAGE_CACHE_MODE_NUM)
33734 rettype = _PAGE_CACHE_MODE_WB;
33735
33736 return rettype;
33737@@ -628,8 +628,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
33738
33739 while (cursor < to) {
33740 if (!devmem_is_allowed(pfn)) {
33741- printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx]\n",
33742- current->comm, from, to - 1);
33743+ printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx] (%#010Lx)\n",
33744+ current->comm, from, to - 1, cursor);
33745 return 0;
33746 }
33747 cursor += PAGE_SIZE;
33748@@ -700,7 +700,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size,
33749 if (ioremap_change_attr((unsigned long)__va(base), id_sz, pcm) < 0) {
33750 printk(KERN_INFO "%s:%d ioremap_change_attr failed %s "
33751 "for [mem %#010Lx-%#010Lx]\n",
33752- current->comm, current->pid,
33753+ current->comm, task_pid_nr(current),
33754 cattr_name(pcm),
33755 base, (unsigned long long)(base + size-1));
33756 return -EINVAL;
33757@@ -735,7 +735,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
33758 pcm = lookup_memtype(paddr);
33759 if (want_pcm != pcm) {
33760 printk(KERN_WARNING "%s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n",
33761- current->comm, current->pid,
33762+ current->comm, task_pid_nr(current),
33763 cattr_name(want_pcm),
33764 (unsigned long long)paddr,
33765 (unsigned long long)(paddr + size - 1),
33766@@ -757,7 +757,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
33767 free_memtype(paddr, paddr + size);
33768 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
33769 " for [mem %#010Lx-%#010Lx], got %s\n",
33770- current->comm, current->pid,
33771+ current->comm, task_pid_nr(current),
33772 cattr_name(want_pcm),
33773 (unsigned long long)paddr,
33774 (unsigned long long)(paddr + size - 1),
33775diff --git a/arch/x86/mm/pat_rbtree.c b/arch/x86/mm/pat_rbtree.c
33776index 6582adc..fcc5d0b 100644
33777--- a/arch/x86/mm/pat_rbtree.c
33778+++ b/arch/x86/mm/pat_rbtree.c
33779@@ -161,7 +161,7 @@ success:
33780
33781 failure:
33782 printk(KERN_INFO "%s:%d conflicting memory types "
33783- "%Lx-%Lx %s<->%s\n", current->comm, current->pid, start,
33784+ "%Lx-%Lx %s<->%s\n", current->comm, task_pid_nr(current), start,
33785 end, cattr_name(found_type), cattr_name(match->type));
33786 return -EBUSY;
33787 }
33788diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
33789index 9f0614d..92ae64a 100644
33790--- a/arch/x86/mm/pf_in.c
33791+++ b/arch/x86/mm/pf_in.c
33792@@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr)
33793 int i;
33794 enum reason_type rv = OTHERS;
33795
33796- p = (unsigned char *)ins_addr;
33797+ p = (unsigned char *)ktla_ktva(ins_addr);
33798 p += skip_prefix(p, &prf);
33799 p += get_opcode(p, &opcode);
33800
33801@@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
33802 struct prefix_bits prf;
33803 int i;
33804
33805- p = (unsigned char *)ins_addr;
33806+ p = (unsigned char *)ktla_ktva(ins_addr);
33807 p += skip_prefix(p, &prf);
33808 p += get_opcode(p, &opcode);
33809
33810@@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
33811 struct prefix_bits prf;
33812 int i;
33813
33814- p = (unsigned char *)ins_addr;
33815+ p = (unsigned char *)ktla_ktva(ins_addr);
33816 p += skip_prefix(p, &prf);
33817 p += get_opcode(p, &opcode);
33818
33819@@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
33820 struct prefix_bits prf;
33821 int i;
33822
33823- p = (unsigned char *)ins_addr;
33824+ p = (unsigned char *)ktla_ktva(ins_addr);
33825 p += skip_prefix(p, &prf);
33826 p += get_opcode(p, &opcode);
33827 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
33828@@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr)
33829 struct prefix_bits prf;
33830 int i;
33831
33832- p = (unsigned char *)ins_addr;
33833+ p = (unsigned char *)ktla_ktva(ins_addr);
33834 p += skip_prefix(p, &prf);
33835 p += get_opcode(p, &opcode);
33836 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
33837diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
33838index 7b22ada..b11e66f 100644
33839--- a/arch/x86/mm/pgtable.c
33840+++ b/arch/x86/mm/pgtable.c
33841@@ -97,10 +97,75 @@ static inline void pgd_list_del(pgd_t *pgd)
33842 list_del(&page->lru);
33843 }
33844
33845-#define UNSHARED_PTRS_PER_PGD \
33846- (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
33847+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
33848+pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
33849
33850+void __shadow_user_pgds(pgd_t *dst, const pgd_t *src)
33851+{
33852+ unsigned int count = USER_PGD_PTRS;
33853
33854+ if (!pax_user_shadow_base)
33855+ return;
33856+
33857+ while (count--)
33858+ *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
33859+}
33860+#endif
33861+
33862+#ifdef CONFIG_PAX_PER_CPU_PGD
33863+void __clone_user_pgds(pgd_t *dst, const pgd_t *src)
33864+{
33865+ unsigned int count = USER_PGD_PTRS;
33866+
33867+ while (count--) {
33868+ pgd_t pgd;
33869+
33870+#ifdef CONFIG_X86_64
33871+ pgd = __pgd(pgd_val(*src++) | _PAGE_USER);
33872+#else
33873+ pgd = *src++;
33874+#endif
33875+
33876+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
33877+ pgd = __pgd(pgd_val(pgd) & clone_pgd_mask);
33878+#endif
33879+
33880+ *dst++ = pgd;
33881+ }
33882+
33883+}
33884+#endif
33885+
33886+#ifdef CONFIG_X86_64
33887+#define pxd_t pud_t
33888+#define pyd_t pgd_t
33889+#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
33890+#define pgtable_pxd_page_ctor(page) true
33891+#define pgtable_pxd_page_dtor(page) do {} while (0)
33892+#define pxd_free(mm, pud) pud_free((mm), (pud))
33893+#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
33894+#define pyd_offset(mm, address) pgd_offset((mm), (address))
33895+#define PYD_SIZE PGDIR_SIZE
33896+#define mm_inc_nr_pxds(mm) do {} while (0)
33897+#define mm_dec_nr_pxds(mm) do {} while (0)
33898+#else
33899+#define pxd_t pmd_t
33900+#define pyd_t pud_t
33901+#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
33902+#define pgtable_pxd_page_ctor(page) pgtable_pmd_page_ctor(page)
33903+#define pgtable_pxd_page_dtor(page) pgtable_pmd_page_dtor(page)
33904+#define pxd_free(mm, pud) pmd_free((mm), (pud))
33905+#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
33906+#define pyd_offset(mm, address) pud_offset((mm), (address))
33907+#define PYD_SIZE PUD_SIZE
33908+#define mm_inc_nr_pxds(mm) mm_inc_nr_pmds(mm)
33909+#define mm_dec_nr_pxds(mm) mm_dec_nr_pmds(mm)
33910+#endif
33911+
33912+#ifdef CONFIG_PAX_PER_CPU_PGD
33913+static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
33914+static inline void pgd_dtor(pgd_t *pgd) {}
33915+#else
33916 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
33917 {
33918 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
33919@@ -141,6 +206,7 @@ static void pgd_dtor(pgd_t *pgd)
33920 pgd_list_del(pgd);
33921 spin_unlock(&pgd_lock);
33922 }
33923+#endif
33924
33925 /*
33926 * List of all pgd's needed for non-PAE so it can invalidate entries
33927@@ -153,7 +219,7 @@ static void pgd_dtor(pgd_t *pgd)
33928 * -- nyc
33929 */
33930
33931-#ifdef CONFIG_X86_PAE
33932+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
33933 /*
33934 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
33935 * updating the top-level pagetable entries to guarantee the
33936@@ -165,7 +231,7 @@ static void pgd_dtor(pgd_t *pgd)
33937 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
33938 * and initialize the kernel pmds here.
33939 */
33940-#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
33941+#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
33942
33943 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
33944 {
33945@@ -183,46 +249,48 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
33946 */
33947 flush_tlb_mm(mm);
33948 }
33949+#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
33950+#define PREALLOCATED_PXDS USER_PGD_PTRS
33951 #else /* !CONFIG_X86_PAE */
33952
33953 /* No need to prepopulate any pagetable entries in non-PAE modes. */
33954-#define PREALLOCATED_PMDS 0
33955+#define PREALLOCATED_PXDS 0
33956
33957 #endif /* CONFIG_X86_PAE */
33958
33959-static void free_pmds(struct mm_struct *mm, pmd_t *pmds[])
33960+static void free_pxds(struct mm_struct *mm, pxd_t *pxds[])
33961 {
33962 int i;
33963
33964- for(i = 0; i < PREALLOCATED_PMDS; i++)
33965- if (pmds[i]) {
33966- pgtable_pmd_page_dtor(virt_to_page(pmds[i]));
33967- free_page((unsigned long)pmds[i]);
33968- mm_dec_nr_pmds(mm);
33969+ for(i = 0; i < PREALLOCATED_PXDS; i++)
33970+ if (pxds[i]) {
33971+ pgtable_pxd_page_dtor(virt_to_page(pxds[i]));
33972+ free_page((unsigned long)pxds[i]);
33973+ mm_dec_nr_pxds(mm);
33974 }
33975 }
33976
33977-static int preallocate_pmds(struct mm_struct *mm, pmd_t *pmds[])
33978+static int preallocate_pxds(struct mm_struct *mm, pxd_t *pxds[])
33979 {
33980 int i;
33981 bool failed = false;
33982
33983- for(i = 0; i < PREALLOCATED_PMDS; i++) {
33984- pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
33985- if (!pmd)
33986+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
33987+ pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
33988+ if (!pxd)
33989 failed = true;
33990- if (pmd && !pgtable_pmd_page_ctor(virt_to_page(pmd))) {
33991- free_page((unsigned long)pmd);
33992- pmd = NULL;
33993+ if (pxd && !pgtable_pxd_page_ctor(virt_to_page(pxd))) {
33994+ free_page((unsigned long)pxd);
33995+ pxd = NULL;
33996 failed = true;
33997 }
33998- if (pmd)
33999- mm_inc_nr_pmds(mm);
34000- pmds[i] = pmd;
34001+ if (pxd)
34002+ mm_inc_nr_pxds(mm);
34003+ pxds[i] = pxd;
34004 }
34005
34006 if (failed) {
34007- free_pmds(mm, pmds);
34008+ free_pxds(mm, pxds);
34009 return -ENOMEM;
34010 }
34011
34012@@ -235,50 +303,54 @@ static int preallocate_pmds(struct mm_struct *mm, pmd_t *pmds[])
34013 * preallocate which never got a corresponding vma will need to be
34014 * freed manually.
34015 */
34016-static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
34017+static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
34018 {
34019 int i;
34020
34021- for(i = 0; i < PREALLOCATED_PMDS; i++) {
34022+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
34023 pgd_t pgd = pgdp[i];
34024
34025 if (pgd_val(pgd) != 0) {
34026- pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
34027+ pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
34028
34029- pgdp[i] = native_make_pgd(0);
34030+ set_pgd(pgdp + i, native_make_pgd(0));
34031
34032- paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
34033- pmd_free(mm, pmd);
34034- mm_dec_nr_pmds(mm);
34035+ paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
34036+ pxd_free(mm, pxd);
34037+ mm_dec_nr_pxds(mm);
34038 }
34039 }
34040 }
34041
34042-static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
34043+static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
34044 {
34045- pud_t *pud;
34046+ pyd_t *pyd;
34047 int i;
34048
34049- if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
34050+ if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
34051 return;
34052
34053- pud = pud_offset(pgd, 0);
34054+#ifdef CONFIG_X86_64
34055+ pyd = pyd_offset(mm, 0L);
34056+#else
34057+ pyd = pyd_offset(pgd, 0L);
34058+#endif
34059
34060- for (i = 0; i < PREALLOCATED_PMDS; i++, pud++) {
34061- pmd_t *pmd = pmds[i];
34062+ for (i = 0; i < PREALLOCATED_PXDS; i++, pyd++) {
34063+ pxd_t *pxd = pxds[i];
34064
34065 if (i >= KERNEL_PGD_BOUNDARY)
34066- memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
34067- sizeof(pmd_t) * PTRS_PER_PMD);
34068+ memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
34069+ sizeof(pxd_t) * PTRS_PER_PMD);
34070
34071- pud_populate(mm, pud, pmd);
34072+ pyd_populate(mm, pyd, pxd);
34073 }
34074 }
34075
34076 pgd_t *pgd_alloc(struct mm_struct *mm)
34077 {
34078 pgd_t *pgd;
34079- pmd_t *pmds[PREALLOCATED_PMDS];
34080+ pxd_t *pxds[PREALLOCATED_PXDS];
34081
34082 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
34083
34084@@ -287,11 +359,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
34085
34086 mm->pgd = pgd;
34087
34088- if (preallocate_pmds(mm, pmds) != 0)
34089+ if (preallocate_pxds(mm, pxds) != 0)
34090 goto out_free_pgd;
34091
34092 if (paravirt_pgd_alloc(mm) != 0)
34093- goto out_free_pmds;
34094+ goto out_free_pxds;
34095
34096 /*
34097 * Make sure that pre-populating the pmds is atomic with
34098@@ -301,14 +373,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
34099 spin_lock(&pgd_lock);
34100
34101 pgd_ctor(mm, pgd);
34102- pgd_prepopulate_pmd(mm, pgd, pmds);
34103+ pgd_prepopulate_pxd(mm, pgd, pxds);
34104
34105 spin_unlock(&pgd_lock);
34106
34107 return pgd;
34108
34109-out_free_pmds:
34110- free_pmds(mm, pmds);
34111+out_free_pxds:
34112+ free_pxds(mm, pxds);
34113 out_free_pgd:
34114 free_page((unsigned long)pgd);
34115 out:
34116@@ -317,7 +389,7 @@ out:
34117
34118 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
34119 {
34120- pgd_mop_up_pmds(mm, pgd);
34121+ pgd_mop_up_pxds(mm, pgd);
34122 pgd_dtor(pgd);
34123 paravirt_pgd_free(mm, pgd);
34124 free_page((unsigned long)pgd);
34125diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
34126index 75cc097..79a097f 100644
34127--- a/arch/x86/mm/pgtable_32.c
34128+++ b/arch/x86/mm/pgtable_32.c
34129@@ -47,10 +47,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
34130 return;
34131 }
34132 pte = pte_offset_kernel(pmd, vaddr);
34133+
34134+ pax_open_kernel();
34135 if (pte_val(pteval))
34136 set_pte_at(&init_mm, vaddr, pte, pteval);
34137 else
34138 pte_clear(&init_mm, vaddr, pte);
34139+ pax_close_kernel();
34140
34141 /*
34142 * It's enough to flush this one mapping.
34143diff --git a/arch/x86/mm/physaddr.c b/arch/x86/mm/physaddr.c
34144index e666cbb..61788c45 100644
34145--- a/arch/x86/mm/physaddr.c
34146+++ b/arch/x86/mm/physaddr.c
34147@@ -10,7 +10,7 @@
34148 #ifdef CONFIG_X86_64
34149
34150 #ifdef CONFIG_DEBUG_VIRTUAL
34151-unsigned long __phys_addr(unsigned long x)
34152+unsigned long __intentional_overflow(-1) __phys_addr(unsigned long x)
34153 {
34154 unsigned long y = x - __START_KERNEL_map;
34155
34156@@ -67,7 +67,7 @@ EXPORT_SYMBOL(__virt_addr_valid);
34157 #else
34158
34159 #ifdef CONFIG_DEBUG_VIRTUAL
34160-unsigned long __phys_addr(unsigned long x)
34161+unsigned long __intentional_overflow(-1) __phys_addr(unsigned long x)
34162 {
34163 unsigned long phys_addr = x - PAGE_OFFSET;
34164 /* VMALLOC_* aren't constants */
34165diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
34166index 90555bf..f5f1828 100644
34167--- a/arch/x86/mm/setup_nx.c
34168+++ b/arch/x86/mm/setup_nx.c
34169@@ -5,8 +5,10 @@
34170 #include <asm/pgtable.h>
34171 #include <asm/proto.h>
34172
34173+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
34174 static int disable_nx;
34175
34176+#ifndef CONFIG_PAX_PAGEEXEC
34177 /*
34178 * noexec = on|off
34179 *
34180@@ -28,12 +30,17 @@ static int __init noexec_setup(char *str)
34181 return 0;
34182 }
34183 early_param("noexec", noexec_setup);
34184+#endif
34185+
34186+#endif
34187
34188 void x86_configure_nx(void)
34189 {
34190+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
34191 if (cpu_has_nx && !disable_nx)
34192 __supported_pte_mask |= _PAGE_NX;
34193 else
34194+#endif
34195 __supported_pte_mask &= ~_PAGE_NX;
34196 }
34197
34198diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
34199index 3250f23..7a97ba2 100644
34200--- a/arch/x86/mm/tlb.c
34201+++ b/arch/x86/mm/tlb.c
34202@@ -45,7 +45,11 @@ void leave_mm(int cpu)
34203 BUG();
34204 if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) {
34205 cpumask_clear_cpu(cpu, mm_cpumask(active_mm));
34206+
34207+#ifndef CONFIG_PAX_PER_CPU_PGD
34208 load_cr3(swapper_pg_dir);
34209+#endif
34210+
34211 /*
34212 * This gets called in the idle path where RCU
34213 * functions differently. Tracing normally
34214diff --git a/arch/x86/mm/uderef_64.c b/arch/x86/mm/uderef_64.c
34215new file mode 100644
34216index 0000000..3fda3f3
34217--- /dev/null
34218+++ b/arch/x86/mm/uderef_64.c
34219@@ -0,0 +1,37 @@
34220+#include <linux/mm.h>
34221+#include <asm/pgtable.h>
34222+#include <asm/uaccess.h>
34223+
34224+#ifdef CONFIG_PAX_MEMORY_UDEREF
34225+/* PaX: due to the special call convention these functions must
34226+ * - remain leaf functions under all configurations,
34227+ * - never be called directly, only dereferenced from the wrappers.
34228+ */
34229+void __used __pax_open_userland(void)
34230+{
34231+ unsigned int cpu;
34232+
34233+ if (unlikely(!segment_eq(get_fs(), USER_DS)))
34234+ return;
34235+
34236+ cpu = raw_get_cpu();
34237+ BUG_ON((read_cr3() & ~PAGE_MASK) != PCID_KERNEL);
34238+ write_cr3(__pa_nodebug(get_cpu_pgd(cpu, user)) | PCID_USER | PCID_NOFLUSH);
34239+ raw_put_cpu_no_resched();
34240+}
34241+EXPORT_SYMBOL(__pax_open_userland);
34242+
34243+void __used __pax_close_userland(void)
34244+{
34245+ unsigned int cpu;
34246+
34247+ if (unlikely(!segment_eq(get_fs(), USER_DS)))
34248+ return;
34249+
34250+ cpu = raw_get_cpu();
34251+ BUG_ON((read_cr3() & ~PAGE_MASK) != PCID_USER);
34252+ write_cr3(__pa_nodebug(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
34253+ raw_put_cpu_no_resched();
34254+}
34255+EXPORT_SYMBOL(__pax_close_userland);
34256+#endif
34257diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S
34258index 6440221..f84b5c7 100644
34259--- a/arch/x86/net/bpf_jit.S
34260+++ b/arch/x86/net/bpf_jit.S
34261@@ -9,6 +9,7 @@
34262 */
34263 #include <linux/linkage.h>
34264 #include <asm/dwarf2.h>
34265+#include <asm/alternative-asm.h>
34266
34267 /*
34268 * Calling convention :
34269@@ -38,6 +39,7 @@ sk_load_word_positive_offset:
34270 jle bpf_slow_path_word
34271 mov (SKBDATA,%rsi),%eax
34272 bswap %eax /* ntohl() */
34273+ pax_force_retaddr
34274 ret
34275
34276 sk_load_half:
34277@@ -55,6 +57,7 @@ sk_load_half_positive_offset:
34278 jle bpf_slow_path_half
34279 movzwl (SKBDATA,%rsi),%eax
34280 rol $8,%ax # ntohs()
34281+ pax_force_retaddr
34282 ret
34283
34284 sk_load_byte:
34285@@ -69,6 +72,7 @@ sk_load_byte_positive_offset:
34286 cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */
34287 jle bpf_slow_path_byte
34288 movzbl (SKBDATA,%rsi),%eax
34289+ pax_force_retaddr
34290 ret
34291
34292 /* rsi contains offset and can be scratched */
34293@@ -90,6 +94,7 @@ bpf_slow_path_word:
34294 js bpf_error
34295 mov - MAX_BPF_STACK + 32(%rbp),%eax
34296 bswap %eax
34297+ pax_force_retaddr
34298 ret
34299
34300 bpf_slow_path_half:
34301@@ -98,12 +103,14 @@ bpf_slow_path_half:
34302 mov - MAX_BPF_STACK + 32(%rbp),%ax
34303 rol $8,%ax
34304 movzwl %ax,%eax
34305+ pax_force_retaddr
34306 ret
34307
34308 bpf_slow_path_byte:
34309 bpf_slow_path_common(1)
34310 js bpf_error
34311 movzbl - MAX_BPF_STACK + 32(%rbp),%eax
34312+ pax_force_retaddr
34313 ret
34314
34315 #define sk_negative_common(SIZE) \
34316@@ -126,6 +133,7 @@ sk_load_word_negative_offset:
34317 sk_negative_common(4)
34318 mov (%rax), %eax
34319 bswap %eax
34320+ pax_force_retaddr
34321 ret
34322
34323 bpf_slow_path_half_neg:
34324@@ -137,6 +145,7 @@ sk_load_half_negative_offset:
34325 mov (%rax),%ax
34326 rol $8,%ax
34327 movzwl %ax,%eax
34328+ pax_force_retaddr
34329 ret
34330
34331 bpf_slow_path_byte_neg:
34332@@ -146,6 +155,7 @@ sk_load_byte_negative_offset:
34333 .globl sk_load_byte_negative_offset
34334 sk_negative_common(1)
34335 movzbl (%rax), %eax
34336+ pax_force_retaddr
34337 ret
34338
34339 bpf_error:
34340@@ -156,4 +166,5 @@ bpf_error:
34341 mov - MAX_BPF_STACK + 16(%rbp),%r14
34342 mov - MAX_BPF_STACK + 24(%rbp),%r15
34343 leaveq
34344+ pax_force_retaddr
34345 ret
34346diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
34347index ddeff48..877ead6 100644
34348--- a/arch/x86/net/bpf_jit_comp.c
34349+++ b/arch/x86/net/bpf_jit_comp.c
34350@@ -13,7 +13,11 @@
34351 #include <linux/if_vlan.h>
34352 #include <asm/cacheflush.h>
34353
34354+#ifdef CONFIG_GRKERNSEC_BPF_HARDEN
34355+int bpf_jit_enable __read_only;
34356+#else
34357 int bpf_jit_enable __read_mostly;
34358+#endif
34359
34360 /*
34361 * assembly code in arch/x86/net/bpf_jit.S
34362@@ -174,7 +178,9 @@ static u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg)
34363 static void jit_fill_hole(void *area, unsigned int size)
34364 {
34365 /* fill whole space with int3 instructions */
34366+ pax_open_kernel();
34367 memset(area, 0xcc, size);
34368+ pax_close_kernel();
34369 }
34370
34371 struct jit_context {
34372@@ -924,7 +930,9 @@ common_load:
34373 pr_err("bpf_jit_compile fatal error\n");
34374 return -EFAULT;
34375 }
34376+ pax_open_kernel();
34377 memcpy(image + proglen, temp, ilen);
34378+ pax_close_kernel();
34379 }
34380 proglen += ilen;
34381 addrs[i] = proglen;
34382@@ -1001,7 +1009,6 @@ void bpf_int_jit_compile(struct bpf_prog *prog)
34383
34384 if (image) {
34385 bpf_flush_icache(header, image + proglen);
34386- set_memory_ro((unsigned long)header, header->pages);
34387 prog->bpf_func = (void *)image;
34388 prog->jited = true;
34389 }
34390@@ -1014,12 +1021,8 @@ void bpf_jit_free(struct bpf_prog *fp)
34391 unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
34392 struct bpf_binary_header *header = (void *)addr;
34393
34394- if (!fp->jited)
34395- goto free_filter;
34396+ if (fp->jited)
34397+ bpf_jit_binary_free(header);
34398
34399- set_memory_rw(addr, header->pages);
34400- bpf_jit_binary_free(header);
34401-
34402-free_filter:
34403 bpf_prog_unlock_free(fp);
34404 }
34405diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
34406index 5d04be5..2beeaa2 100644
34407--- a/arch/x86/oprofile/backtrace.c
34408+++ b/arch/x86/oprofile/backtrace.c
34409@@ -46,11 +46,11 @@ dump_user_backtrace_32(struct stack_frame_ia32 *head)
34410 struct stack_frame_ia32 *fp;
34411 unsigned long bytes;
34412
34413- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
34414+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
34415 if (bytes != 0)
34416 return NULL;
34417
34418- fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
34419+ fp = (struct stack_frame_ia32 __force_kernel *) compat_ptr(bufhead[0].next_frame);
34420
34421 oprofile_add_trace(bufhead[0].return_address);
34422
34423@@ -92,7 +92,7 @@ static struct stack_frame *dump_user_backtrace(struct stack_frame *head)
34424 struct stack_frame bufhead[2];
34425 unsigned long bytes;
34426
34427- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
34428+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
34429 if (bytes != 0)
34430 return NULL;
34431
34432@@ -111,7 +111,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
34433 {
34434 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
34435
34436- if (!user_mode_vm(regs)) {
34437+ if (!user_mode(regs)) {
34438 unsigned long stack = kernel_stack_pointer(regs);
34439 if (depth)
34440 dump_trace(NULL, regs, (unsigned long *)stack, 0,
34441diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
34442index 1d2e639..f6ef82a 100644
34443--- a/arch/x86/oprofile/nmi_int.c
34444+++ b/arch/x86/oprofile/nmi_int.c
34445@@ -23,6 +23,7 @@
34446 #include <asm/nmi.h>
34447 #include <asm/msr.h>
34448 #include <asm/apic.h>
34449+#include <asm/pgtable.h>
34450
34451 #include "op_counter.h"
34452 #include "op_x86_model.h"
34453@@ -785,8 +786,11 @@ int __init op_nmi_init(struct oprofile_operations *ops)
34454 if (ret)
34455 return ret;
34456
34457- if (!model->num_virt_counters)
34458- model->num_virt_counters = model->num_counters;
34459+ if (!model->num_virt_counters) {
34460+ pax_open_kernel();
34461+ *(unsigned int *)&model->num_virt_counters = model->num_counters;
34462+ pax_close_kernel();
34463+ }
34464
34465 mux_init(ops);
34466
34467diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c
34468index 50d86c0..7985318 100644
34469--- a/arch/x86/oprofile/op_model_amd.c
34470+++ b/arch/x86/oprofile/op_model_amd.c
34471@@ -519,9 +519,11 @@ static int op_amd_init(struct oprofile_operations *ops)
34472 num_counters = AMD64_NUM_COUNTERS;
34473 }
34474
34475- op_amd_spec.num_counters = num_counters;
34476- op_amd_spec.num_controls = num_counters;
34477- op_amd_spec.num_virt_counters = max(num_counters, NUM_VIRT_COUNTERS);
34478+ pax_open_kernel();
34479+ *(unsigned int *)&op_amd_spec.num_counters = num_counters;
34480+ *(unsigned int *)&op_amd_spec.num_controls = num_counters;
34481+ *(unsigned int *)&op_amd_spec.num_virt_counters = max(num_counters, NUM_VIRT_COUNTERS);
34482+ pax_close_kernel();
34483
34484 return 0;
34485 }
34486diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c
34487index d90528e..0127e2b 100644
34488--- a/arch/x86/oprofile/op_model_ppro.c
34489+++ b/arch/x86/oprofile/op_model_ppro.c
34490@@ -19,6 +19,7 @@
34491 #include <asm/msr.h>
34492 #include <asm/apic.h>
34493 #include <asm/nmi.h>
34494+#include <asm/pgtable.h>
34495
34496 #include "op_x86_model.h"
34497 #include "op_counter.h"
34498@@ -221,8 +222,10 @@ static void arch_perfmon_setup_counters(void)
34499
34500 num_counters = min((int)eax.split.num_counters, OP_MAX_COUNTER);
34501
34502- op_arch_perfmon_spec.num_counters = num_counters;
34503- op_arch_perfmon_spec.num_controls = num_counters;
34504+ pax_open_kernel();
34505+ *(unsigned int *)&op_arch_perfmon_spec.num_counters = num_counters;
34506+ *(unsigned int *)&op_arch_perfmon_spec.num_controls = num_counters;
34507+ pax_close_kernel();
34508 }
34509
34510 static int arch_perfmon_init(struct oprofile_operations *ignore)
34511diff --git a/arch/x86/oprofile/op_x86_model.h b/arch/x86/oprofile/op_x86_model.h
34512index 71e8a67..6a313bb 100644
34513--- a/arch/x86/oprofile/op_x86_model.h
34514+++ b/arch/x86/oprofile/op_x86_model.h
34515@@ -52,7 +52,7 @@ struct op_x86_model_spec {
34516 void (*switch_ctrl)(struct op_x86_model_spec const *model,
34517 struct op_msrs const * const msrs);
34518 #endif
34519-};
34520+} __do_const;
34521
34522 struct op_counter_config;
34523
34524diff --git a/arch/x86/pci/intel_mid_pci.c b/arch/x86/pci/intel_mid_pci.c
34525index 852aa4c..71613f2 100644
34526--- a/arch/x86/pci/intel_mid_pci.c
34527+++ b/arch/x86/pci/intel_mid_pci.c
34528@@ -258,7 +258,7 @@ int __init intel_mid_pci_init(void)
34529 pci_mmcfg_late_init();
34530 pcibios_enable_irq = intel_mid_pci_irq_enable;
34531 pcibios_disable_irq = intel_mid_pci_irq_disable;
34532- pci_root_ops = intel_mid_pci_ops;
34533+ memcpy((void *)&pci_root_ops, &intel_mid_pci_ops, sizeof pci_root_ops);
34534 pci_soc_mode = 1;
34535 /* Continue with standard init */
34536 return 1;
34537diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c
34538index 5dc6ca5..25c03f5 100644
34539--- a/arch/x86/pci/irq.c
34540+++ b/arch/x86/pci/irq.c
34541@@ -51,7 +51,7 @@ struct irq_router {
34542 struct irq_router_handler {
34543 u16 vendor;
34544 int (*probe)(struct irq_router *r, struct pci_dev *router, u16 device);
34545-};
34546+} __do_const;
34547
34548 int (*pcibios_enable_irq)(struct pci_dev *dev) = pirq_enable_irq;
34549 void (*pcibios_disable_irq)(struct pci_dev *dev) = pirq_disable_irq;
34550@@ -791,7 +791,7 @@ static __init int pico_router_probe(struct irq_router *r, struct pci_dev *router
34551 return 0;
34552 }
34553
34554-static __initdata struct irq_router_handler pirq_routers[] = {
34555+static __initconst const struct irq_router_handler pirq_routers[] = {
34556 { PCI_VENDOR_ID_INTEL, intel_router_probe },
34557 { PCI_VENDOR_ID_AL, ali_router_probe },
34558 { PCI_VENDOR_ID_ITE, ite_router_probe },
34559@@ -818,7 +818,7 @@ static struct pci_dev *pirq_router_dev;
34560 static void __init pirq_find_router(struct irq_router *r)
34561 {
34562 struct irq_routing_table *rt = pirq_table;
34563- struct irq_router_handler *h;
34564+ const struct irq_router_handler *h;
34565
34566 #ifdef CONFIG_PCI_BIOS
34567 if (!rt->signature) {
34568@@ -1091,7 +1091,7 @@ static int __init fix_acer_tm360_irqrouting(const struct dmi_system_id *d)
34569 return 0;
34570 }
34571
34572-static struct dmi_system_id __initdata pciirq_dmi_table[] = {
34573+static const struct dmi_system_id __initconst pciirq_dmi_table[] = {
34574 {
34575 .callback = fix_broken_hp_bios_irq9,
34576 .ident = "HP Pavilion N5400 Series Laptop",
34577diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
34578index 9b83b90..4112152 100644
34579--- a/arch/x86/pci/pcbios.c
34580+++ b/arch/x86/pci/pcbios.c
34581@@ -79,7 +79,7 @@ union bios32 {
34582 static struct {
34583 unsigned long address;
34584 unsigned short segment;
34585-} bios32_indirect __initdata = { 0, __KERNEL_CS };
34586+} bios32_indirect __initconst = { 0, __PCIBIOS_CS };
34587
34588 /*
34589 * Returns the entry point for the given service, NULL on error
34590@@ -92,37 +92,80 @@ static unsigned long __init bios32_service(unsigned long service)
34591 unsigned long length; /* %ecx */
34592 unsigned long entry; /* %edx */
34593 unsigned long flags;
34594+ struct desc_struct d, *gdt;
34595
34596 local_irq_save(flags);
34597- __asm__("lcall *(%%edi); cld"
34598+
34599+ gdt = get_cpu_gdt_table(smp_processor_id());
34600+
34601+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
34602+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
34603+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
34604+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
34605+
34606+ __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
34607 : "=a" (return_code),
34608 "=b" (address),
34609 "=c" (length),
34610 "=d" (entry)
34611 : "0" (service),
34612 "1" (0),
34613- "D" (&bios32_indirect));
34614+ "D" (&bios32_indirect),
34615+ "r"(__PCIBIOS_DS)
34616+ : "memory");
34617+
34618+ pax_open_kernel();
34619+ gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
34620+ gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
34621+ gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
34622+ gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
34623+ pax_close_kernel();
34624+
34625 local_irq_restore(flags);
34626
34627 switch (return_code) {
34628- case 0:
34629- return address + entry;
34630- case 0x80: /* Not present */
34631- printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
34632- return 0;
34633- default: /* Shouldn't happen */
34634- printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
34635- service, return_code);
34636+ case 0: {
34637+ int cpu;
34638+ unsigned char flags;
34639+
34640+ printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
34641+ if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
34642+ printk(KERN_WARNING "bios32_service: not valid\n");
34643 return 0;
34644+ }
34645+ address = address + PAGE_OFFSET;
34646+ length += 16UL; /* some BIOSs underreport this... */
34647+ flags = 4;
34648+ if (length >= 64*1024*1024) {
34649+ length >>= PAGE_SHIFT;
34650+ flags |= 8;
34651+ }
34652+
34653+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
34654+ gdt = get_cpu_gdt_table(cpu);
34655+ pack_descriptor(&d, address, length, 0x9b, flags);
34656+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
34657+ pack_descriptor(&d, address, length, 0x93, flags);
34658+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
34659+ }
34660+ return entry;
34661+ }
34662+ case 0x80: /* Not present */
34663+ printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
34664+ return 0;
34665+ default: /* Shouldn't happen */
34666+ printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
34667+ service, return_code);
34668+ return 0;
34669 }
34670 }
34671
34672 static struct {
34673 unsigned long address;
34674 unsigned short segment;
34675-} pci_indirect = { 0, __KERNEL_CS };
34676+} pci_indirect __read_only = { 0, __PCIBIOS_CS };
34677
34678-static int pci_bios_present;
34679+static int pci_bios_present __read_only;
34680
34681 static int __init check_pcibios(void)
34682 {
34683@@ -131,11 +174,13 @@ static int __init check_pcibios(void)
34684 unsigned long flags, pcibios_entry;
34685
34686 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
34687- pci_indirect.address = pcibios_entry + PAGE_OFFSET;
34688+ pci_indirect.address = pcibios_entry;
34689
34690 local_irq_save(flags);
34691- __asm__(
34692- "lcall *(%%edi); cld\n\t"
34693+ __asm__("movw %w6, %%ds\n\t"
34694+ "lcall *%%ss:(%%edi); cld\n\t"
34695+ "push %%ss\n\t"
34696+ "pop %%ds\n\t"
34697 "jc 1f\n\t"
34698 "xor %%ah, %%ah\n"
34699 "1:"
34700@@ -144,7 +189,8 @@ static int __init check_pcibios(void)
34701 "=b" (ebx),
34702 "=c" (ecx)
34703 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
34704- "D" (&pci_indirect)
34705+ "D" (&pci_indirect),
34706+ "r" (__PCIBIOS_DS)
34707 : "memory");
34708 local_irq_restore(flags);
34709
34710@@ -189,7 +235,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
34711
34712 switch (len) {
34713 case 1:
34714- __asm__("lcall *(%%esi); cld\n\t"
34715+ __asm__("movw %w6, %%ds\n\t"
34716+ "lcall *%%ss:(%%esi); cld\n\t"
34717+ "push %%ss\n\t"
34718+ "pop %%ds\n\t"
34719 "jc 1f\n\t"
34720 "xor %%ah, %%ah\n"
34721 "1:"
34722@@ -198,7 +247,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
34723 : "1" (PCIBIOS_READ_CONFIG_BYTE),
34724 "b" (bx),
34725 "D" ((long)reg),
34726- "S" (&pci_indirect));
34727+ "S" (&pci_indirect),
34728+ "r" (__PCIBIOS_DS));
34729 /*
34730 * Zero-extend the result beyond 8 bits, do not trust the
34731 * BIOS having done it:
34732@@ -206,7 +256,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
34733 *value &= 0xff;
34734 break;
34735 case 2:
34736- __asm__("lcall *(%%esi); cld\n\t"
34737+ __asm__("movw %w6, %%ds\n\t"
34738+ "lcall *%%ss:(%%esi); cld\n\t"
34739+ "push %%ss\n\t"
34740+ "pop %%ds\n\t"
34741 "jc 1f\n\t"
34742 "xor %%ah, %%ah\n"
34743 "1:"
34744@@ -215,7 +268,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
34745 : "1" (PCIBIOS_READ_CONFIG_WORD),
34746 "b" (bx),
34747 "D" ((long)reg),
34748- "S" (&pci_indirect));
34749+ "S" (&pci_indirect),
34750+ "r" (__PCIBIOS_DS));
34751 /*
34752 * Zero-extend the result beyond 16 bits, do not trust the
34753 * BIOS having done it:
34754@@ -223,7 +277,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
34755 *value &= 0xffff;
34756 break;
34757 case 4:
34758- __asm__("lcall *(%%esi); cld\n\t"
34759+ __asm__("movw %w6, %%ds\n\t"
34760+ "lcall *%%ss:(%%esi); cld\n\t"
34761+ "push %%ss\n\t"
34762+ "pop %%ds\n\t"
34763 "jc 1f\n\t"
34764 "xor %%ah, %%ah\n"
34765 "1:"
34766@@ -232,7 +289,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
34767 : "1" (PCIBIOS_READ_CONFIG_DWORD),
34768 "b" (bx),
34769 "D" ((long)reg),
34770- "S" (&pci_indirect));
34771+ "S" (&pci_indirect),
34772+ "r" (__PCIBIOS_DS));
34773 break;
34774 }
34775
34776@@ -256,7 +314,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
34777
34778 switch (len) {
34779 case 1:
34780- __asm__("lcall *(%%esi); cld\n\t"
34781+ __asm__("movw %w6, %%ds\n\t"
34782+ "lcall *%%ss:(%%esi); cld\n\t"
34783+ "push %%ss\n\t"
34784+ "pop %%ds\n\t"
34785 "jc 1f\n\t"
34786 "xor %%ah, %%ah\n"
34787 "1:"
34788@@ -265,10 +326,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
34789 "c" (value),
34790 "b" (bx),
34791 "D" ((long)reg),
34792- "S" (&pci_indirect));
34793+ "S" (&pci_indirect),
34794+ "r" (__PCIBIOS_DS));
34795 break;
34796 case 2:
34797- __asm__("lcall *(%%esi); cld\n\t"
34798+ __asm__("movw %w6, %%ds\n\t"
34799+ "lcall *%%ss:(%%esi); cld\n\t"
34800+ "push %%ss\n\t"
34801+ "pop %%ds\n\t"
34802 "jc 1f\n\t"
34803 "xor %%ah, %%ah\n"
34804 "1:"
34805@@ -277,10 +342,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
34806 "c" (value),
34807 "b" (bx),
34808 "D" ((long)reg),
34809- "S" (&pci_indirect));
34810+ "S" (&pci_indirect),
34811+ "r" (__PCIBIOS_DS));
34812 break;
34813 case 4:
34814- __asm__("lcall *(%%esi); cld\n\t"
34815+ __asm__("movw %w6, %%ds\n\t"
34816+ "lcall *%%ss:(%%esi); cld\n\t"
34817+ "push %%ss\n\t"
34818+ "pop %%ds\n\t"
34819 "jc 1f\n\t"
34820 "xor %%ah, %%ah\n"
34821 "1:"
34822@@ -289,7 +358,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
34823 "c" (value),
34824 "b" (bx),
34825 "D" ((long)reg),
34826- "S" (&pci_indirect));
34827+ "S" (&pci_indirect),
34828+ "r" (__PCIBIOS_DS));
34829 break;
34830 }
34831
34832@@ -394,10 +464,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
34833
34834 DBG("PCI: Fetching IRQ routing table... ");
34835 __asm__("push %%es\n\t"
34836+ "movw %w8, %%ds\n\t"
34837 "push %%ds\n\t"
34838 "pop %%es\n\t"
34839- "lcall *(%%esi); cld\n\t"
34840+ "lcall *%%ss:(%%esi); cld\n\t"
34841 "pop %%es\n\t"
34842+ "push %%ss\n\t"
34843+ "pop %%ds\n"
34844 "jc 1f\n\t"
34845 "xor %%ah, %%ah\n"
34846 "1:"
34847@@ -408,7 +481,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
34848 "1" (0),
34849 "D" ((long) &opt),
34850 "S" (&pci_indirect),
34851- "m" (opt)
34852+ "m" (opt),
34853+ "r" (__PCIBIOS_DS)
34854 : "memory");
34855 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
34856 if (ret & 0xff00)
34857@@ -432,7 +506,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
34858 {
34859 int ret;
34860
34861- __asm__("lcall *(%%esi); cld\n\t"
34862+ __asm__("movw %w5, %%ds\n\t"
34863+ "lcall *%%ss:(%%esi); cld\n\t"
34864+ "push %%ss\n\t"
34865+ "pop %%ds\n"
34866 "jc 1f\n\t"
34867 "xor %%ah, %%ah\n"
34868 "1:"
34869@@ -440,7 +517,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
34870 : "0" (PCIBIOS_SET_PCI_HW_INT),
34871 "b" ((dev->bus->number << 8) | dev->devfn),
34872 "c" ((irq << 8) | (pin + 10)),
34873- "S" (&pci_indirect));
34874+ "S" (&pci_indirect),
34875+ "r" (__PCIBIOS_DS));
34876 return !(ret & 0xff00);
34877 }
34878 EXPORT_SYMBOL(pcibios_set_irq_routing);
34879diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
34880index 40e7cda..c7e6672 100644
34881--- a/arch/x86/platform/efi/efi_32.c
34882+++ b/arch/x86/platform/efi/efi_32.c
34883@@ -61,11 +61,22 @@ void __init efi_call_phys_prolog(void)
34884 {
34885 struct desc_ptr gdt_descr;
34886
34887+#ifdef CONFIG_PAX_KERNEXEC
34888+ struct desc_struct d;
34889+#endif
34890+
34891 local_irq_save(efi_rt_eflags);
34892
34893 load_cr3(initial_page_table);
34894 __flush_tlb_all();
34895
34896+#ifdef CONFIG_PAX_KERNEXEC
34897+ pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
34898+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
34899+ pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
34900+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
34901+#endif
34902+
34903 gdt_descr.address = __pa(get_cpu_gdt_table(0));
34904 gdt_descr.size = GDT_SIZE - 1;
34905 load_gdt(&gdt_descr);
34906@@ -75,11 +86,24 @@ void __init efi_call_phys_epilog(void)
34907 {
34908 struct desc_ptr gdt_descr;
34909
34910+#ifdef CONFIG_PAX_KERNEXEC
34911+ struct desc_struct d;
34912+
34913+ memset(&d, 0, sizeof d);
34914+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
34915+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
34916+#endif
34917+
34918 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
34919 gdt_descr.size = GDT_SIZE - 1;
34920 load_gdt(&gdt_descr);
34921
34922+#ifdef CONFIG_PAX_PER_CPU_PGD
34923+ load_cr3(get_cpu_pgd(smp_processor_id(), kernel));
34924+#else
34925 load_cr3(swapper_pg_dir);
34926+#endif
34927+
34928 __flush_tlb_all();
34929
34930 local_irq_restore(efi_rt_eflags);
34931diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
34932index 17e80d8..9fa6e41 100644
34933--- a/arch/x86/platform/efi/efi_64.c
34934+++ b/arch/x86/platform/efi/efi_64.c
34935@@ -98,6 +98,11 @@ void __init efi_call_phys_prolog(void)
34936 vaddress = (unsigned long)__va(pgd * PGDIR_SIZE);
34937 set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), *pgd_offset_k(vaddress));
34938 }
34939+
34940+#ifdef CONFIG_PAX_PER_CPU_PGD
34941+ load_cr3(swapper_pg_dir);
34942+#endif
34943+
34944 __flush_tlb_all();
34945 }
34946
34947@@ -115,6 +120,11 @@ void __init efi_call_phys_epilog(void)
34948 for (pgd = 0; pgd < n_pgds; pgd++)
34949 set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), save_pgd[pgd]);
34950 kfree(save_pgd);
34951+
34952+#ifdef CONFIG_PAX_PER_CPU_PGD
34953+ load_cr3(get_cpu_pgd(smp_processor_id(), kernel));
34954+#endif
34955+
34956 __flush_tlb_all();
34957 local_irq_restore(efi_flags);
34958 early_code_mapping_set_exec(0);
34959@@ -145,8 +155,23 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
34960 unsigned npages;
34961 pgd_t *pgd;
34962
34963- if (efi_enabled(EFI_OLD_MEMMAP))
34964+ if (efi_enabled(EFI_OLD_MEMMAP)) {
34965+ /* PaX: We need to disable the NX bit in the PGD, otherwise we won't be
34966+ * able to execute the EFI services.
34967+ */
34968+ if (__supported_pte_mask & _PAGE_NX) {
34969+ unsigned long addr = (unsigned long) __va(0);
34970+ pgd_t pe = __pgd(pgd_val(*pgd_offset_k(addr)) & ~_PAGE_NX);
34971+
34972+ pr_alert("PAX: Disabling NX protection for low memory map. Try booting without \"efi=old_map\"\n");
34973+#ifdef CONFIG_PAX_PER_CPU_PGD
34974+ set_pgd(pgd_offset_cpu(0, kernel, addr), pe);
34975+#endif
34976+ set_pgd(pgd_offset_k(addr), pe);
34977+ }
34978+
34979 return 0;
34980+ }
34981
34982 efi_scratch.efi_pgt = (pgd_t *)(unsigned long)real_mode_header->trampoline_pgd;
34983 pgd = __va(efi_scratch.efi_pgt);
34984diff --git a/arch/x86/platform/efi/efi_stub_32.S b/arch/x86/platform/efi/efi_stub_32.S
34985index 040192b..7d3300f 100644
34986--- a/arch/x86/platform/efi/efi_stub_32.S
34987+++ b/arch/x86/platform/efi/efi_stub_32.S
34988@@ -6,7 +6,9 @@
34989 */
34990
34991 #include <linux/linkage.h>
34992+#include <linux/init.h>
34993 #include <asm/page_types.h>
34994+#include <asm/segment.h>
34995
34996 /*
34997 * efi_call_phys(void *, ...) is a function with variable parameters.
34998@@ -20,7 +22,7 @@
34999 * service functions will comply with gcc calling convention, too.
35000 */
35001
35002-.text
35003+__INIT
35004 ENTRY(efi_call_phys)
35005 /*
35006 * 0. The function can only be called in Linux kernel. So CS has been
35007@@ -36,10 +38,24 @@ ENTRY(efi_call_phys)
35008 * The mapping of lower virtual memory has been created in prolog and
35009 * epilog.
35010 */
35011- movl $1f, %edx
35012- subl $__PAGE_OFFSET, %edx
35013- jmp *%edx
35014+#ifdef CONFIG_PAX_KERNEXEC
35015+ movl $(__KERNEXEC_EFI_DS), %edx
35016+ mov %edx, %ds
35017+ mov %edx, %es
35018+ mov %edx, %ss
35019+ addl $2f,(1f)
35020+ ljmp *(1f)
35021+
35022+__INITDATA
35023+1: .long __LOAD_PHYSICAL_ADDR, __KERNEXEC_EFI_CS
35024+.previous
35025+
35026+2:
35027+ subl $2b,(1b)
35028+#else
35029+ jmp 1f-__PAGE_OFFSET
35030 1:
35031+#endif
35032
35033 /*
35034 * 2. Now on the top of stack is the return
35035@@ -47,14 +63,8 @@ ENTRY(efi_call_phys)
35036 * parameter 2, ..., param n. To make things easy, we save the return
35037 * address of efi_call_phys in a global variable.
35038 */
35039- popl %edx
35040- movl %edx, saved_return_addr
35041- /* get the function pointer into ECX*/
35042- popl %ecx
35043- movl %ecx, efi_rt_function_ptr
35044- movl $2f, %edx
35045- subl $__PAGE_OFFSET, %edx
35046- pushl %edx
35047+ popl (saved_return_addr)
35048+ popl (efi_rt_function_ptr)
35049
35050 /*
35051 * 3. Clear PG bit in %CR0.
35052@@ -73,9 +83,8 @@ ENTRY(efi_call_phys)
35053 /*
35054 * 5. Call the physical function.
35055 */
35056- jmp *%ecx
35057+ call *(efi_rt_function_ptr-__PAGE_OFFSET)
35058
35059-2:
35060 /*
35061 * 6. After EFI runtime service returns, control will return to
35062 * following instruction. We'd better readjust stack pointer first.
35063@@ -88,35 +97,36 @@ ENTRY(efi_call_phys)
35064 movl %cr0, %edx
35065 orl $0x80000000, %edx
35066 movl %edx, %cr0
35067- jmp 1f
35068-1:
35069+
35070 /*
35071 * 8. Now restore the virtual mode from flat mode by
35072 * adding EIP with PAGE_OFFSET.
35073 */
35074- movl $1f, %edx
35075- jmp *%edx
35076+#ifdef CONFIG_PAX_KERNEXEC
35077+ movl $(__KERNEL_DS), %edx
35078+ mov %edx, %ds
35079+ mov %edx, %es
35080+ mov %edx, %ss
35081+ ljmp $(__KERNEL_CS),$1f
35082+#else
35083+ jmp 1f+__PAGE_OFFSET
35084+#endif
35085 1:
35086
35087 /*
35088 * 9. Balance the stack. And because EAX contain the return value,
35089 * we'd better not clobber it.
35090 */
35091- leal efi_rt_function_ptr, %edx
35092- movl (%edx), %ecx
35093- pushl %ecx
35094+ pushl (efi_rt_function_ptr)
35095
35096 /*
35097- * 10. Push the saved return address onto the stack and return.
35098+ * 10. Return to the saved return address.
35099 */
35100- leal saved_return_addr, %edx
35101- movl (%edx), %ecx
35102- pushl %ecx
35103- ret
35104+ jmpl *(saved_return_addr)
35105 ENDPROC(efi_call_phys)
35106 .previous
35107
35108-.data
35109+__INITDATA
35110 saved_return_addr:
35111 .long 0
35112 efi_rt_function_ptr:
35113diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S
35114index 86d0f9e..6d499f4 100644
35115--- a/arch/x86/platform/efi/efi_stub_64.S
35116+++ b/arch/x86/platform/efi/efi_stub_64.S
35117@@ -11,6 +11,7 @@
35118 #include <asm/msr.h>
35119 #include <asm/processor-flags.h>
35120 #include <asm/page_types.h>
35121+#include <asm/alternative-asm.h>
35122
35123 #define SAVE_XMM \
35124 mov %rsp, %rax; \
35125@@ -88,6 +89,7 @@ ENTRY(efi_call)
35126 RESTORE_PGT
35127 addq $48, %rsp
35128 RESTORE_XMM
35129+ pax_force_retaddr 0, 1
35130 ret
35131 ENDPROC(efi_call)
35132
35133diff --git a/arch/x86/platform/intel-mid/intel-mid.c b/arch/x86/platform/intel-mid/intel-mid.c
35134index 3005f0c..d06aeb0 100644
35135--- a/arch/x86/platform/intel-mid/intel-mid.c
35136+++ b/arch/x86/platform/intel-mid/intel-mid.c
35137@@ -63,7 +63,7 @@ enum intel_mid_timer_options intel_mid_timer_options;
35138 /* intel_mid_ops to store sub arch ops */
35139 struct intel_mid_ops *intel_mid_ops;
35140 /* getter function for sub arch ops*/
35141-static void *(*get_intel_mid_ops[])(void) = INTEL_MID_OPS_INIT;
35142+static const void *(*get_intel_mid_ops[])(void) = INTEL_MID_OPS_INIT;
35143 enum intel_mid_cpu_type __intel_mid_cpu_chip;
35144 EXPORT_SYMBOL_GPL(__intel_mid_cpu_chip);
35145
35146@@ -71,9 +71,10 @@ static void intel_mid_power_off(void)
35147 {
35148 };
35149
35150-static void intel_mid_reboot(void)
35151+static void __noreturn intel_mid_reboot(void)
35152 {
35153 intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0);
35154+ BUG();
35155 }
35156
35157 static unsigned long __init intel_mid_calibrate_tsc(void)
35158diff --git a/arch/x86/platform/intel-mid/intel_mid_weak_decls.h b/arch/x86/platform/intel-mid/intel_mid_weak_decls.h
35159index 3c1c386..59a68ed 100644
35160--- a/arch/x86/platform/intel-mid/intel_mid_weak_decls.h
35161+++ b/arch/x86/platform/intel-mid/intel_mid_weak_decls.h
35162@@ -13,6 +13,6 @@
35163 /* For every CPU addition a new get_<cpuname>_ops interface needs
35164 * to be added.
35165 */
35166-extern void *get_penwell_ops(void);
35167-extern void *get_cloverview_ops(void);
35168-extern void *get_tangier_ops(void);
35169+extern const void *get_penwell_ops(void);
35170+extern const void *get_cloverview_ops(void);
35171+extern const void *get_tangier_ops(void);
35172diff --git a/arch/x86/platform/intel-mid/mfld.c b/arch/x86/platform/intel-mid/mfld.c
35173index 23381d2..8ddc10e 100644
35174--- a/arch/x86/platform/intel-mid/mfld.c
35175+++ b/arch/x86/platform/intel-mid/mfld.c
35176@@ -64,12 +64,12 @@ static void __init penwell_arch_setup(void)
35177 pm_power_off = mfld_power_off;
35178 }
35179
35180-void *get_penwell_ops(void)
35181+const void *get_penwell_ops(void)
35182 {
35183 return &penwell_ops;
35184 }
35185
35186-void *get_cloverview_ops(void)
35187+const void *get_cloverview_ops(void)
35188 {
35189 return &penwell_ops;
35190 }
35191diff --git a/arch/x86/platform/intel-mid/mrfl.c b/arch/x86/platform/intel-mid/mrfl.c
35192index aaca917..66eadbc 100644
35193--- a/arch/x86/platform/intel-mid/mrfl.c
35194+++ b/arch/x86/platform/intel-mid/mrfl.c
35195@@ -97,7 +97,7 @@ static struct intel_mid_ops tangier_ops = {
35196 .arch_setup = tangier_arch_setup,
35197 };
35198
35199-void *get_tangier_ops(void)
35200+const void *get_tangier_ops(void)
35201 {
35202 return &tangier_ops;
35203 }
35204diff --git a/arch/x86/platform/intel-quark/imr_selftest.c b/arch/x86/platform/intel-quark/imr_selftest.c
35205index c9a0838..fae0977 100644
35206--- a/arch/x86/platform/intel-quark/imr_selftest.c
35207+++ b/arch/x86/platform/intel-quark/imr_selftest.c
35208@@ -54,7 +54,7 @@ static void __init imr_self_test_result(int res, const char *fmt, ...)
35209 */
35210 static void __init imr_self_test(void)
35211 {
35212- phys_addr_t base = virt_to_phys(&_text);
35213+ phys_addr_t base = virt_to_phys(ktla_ktva(_text));
35214 size_t size = virt_to_phys(&__end_rodata) - base;
35215 const char *fmt_over = "overlapped IMR @ (0x%08lx - 0x%08lx)\n";
35216 int ret;
35217diff --git a/arch/x86/platform/olpc/olpc_dt.c b/arch/x86/platform/olpc/olpc_dt.c
35218index d6ee929..3637cb5 100644
35219--- a/arch/x86/platform/olpc/olpc_dt.c
35220+++ b/arch/x86/platform/olpc/olpc_dt.c
35221@@ -156,7 +156,7 @@ void * __init prom_early_alloc(unsigned long size)
35222 return res;
35223 }
35224
35225-static struct of_pdt_ops prom_olpc_ops __initdata = {
35226+static struct of_pdt_ops prom_olpc_ops __initconst = {
35227 .nextprop = olpc_dt_nextprop,
35228 .getproplen = olpc_dt_getproplen,
35229 .getproperty = olpc_dt_getproperty,
35230diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
35231index 3e32ed5..cc0adc5 100644
35232--- a/arch/x86/power/cpu.c
35233+++ b/arch/x86/power/cpu.c
35234@@ -134,11 +134,8 @@ static void do_fpu_end(void)
35235 static void fix_processor_context(void)
35236 {
35237 int cpu = smp_processor_id();
35238- struct tss_struct *t = &per_cpu(init_tss, cpu);
35239-#ifdef CONFIG_X86_64
35240- struct desc_struct *desc = get_cpu_gdt_table(cpu);
35241- tss_desc tss;
35242-#endif
35243+ struct tss_struct *t = init_tss + cpu;
35244+
35245 set_tss_desc(cpu, t); /*
35246 * This just modifies memory; should not be
35247 * necessary. But... This is necessary, because
35248@@ -147,10 +144,6 @@ static void fix_processor_context(void)
35249 */
35250
35251 #ifdef CONFIG_X86_64
35252- memcpy(&tss, &desc[GDT_ENTRY_TSS], sizeof(tss_desc));
35253- tss.type = 0x9; /* The available 64-bit TSS (see AMD vol 2, pg 91 */
35254- write_gdt_entry(desc, GDT_ENTRY_TSS, &tss, DESC_TSS);
35255-
35256 syscall_init(); /* This sets MSR_*STAR and related */
35257 #endif
35258 load_TR_desc(); /* This does ltr */
35259diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c
35260index 0b7a63d..0d0f2c2 100644
35261--- a/arch/x86/realmode/init.c
35262+++ b/arch/x86/realmode/init.c
35263@@ -68,7 +68,13 @@ void __init setup_real_mode(void)
35264 __va(real_mode_header->trampoline_header);
35265
35266 #ifdef CONFIG_X86_32
35267- trampoline_header->start = __pa_symbol(startup_32_smp);
35268+ trampoline_header->start = __pa_symbol(ktla_ktva(startup_32_smp));
35269+
35270+#ifdef CONFIG_PAX_KERNEXEC
35271+ trampoline_header->start -= LOAD_PHYSICAL_ADDR;
35272+#endif
35273+
35274+ trampoline_header->boot_cs = __BOOT_CS;
35275 trampoline_header->gdt_limit = __BOOT_DS + 7;
35276 trampoline_header->gdt_base = __pa_symbol(boot_gdt);
35277 #else
35278@@ -84,7 +90,7 @@ void __init setup_real_mode(void)
35279 *trampoline_cr4_features = __read_cr4();
35280
35281 trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd);
35282- trampoline_pgd[0] = init_level4_pgt[pgd_index(__PAGE_OFFSET)].pgd;
35283+ trampoline_pgd[0] = init_level4_pgt[pgd_index(__PAGE_OFFSET)].pgd & ~_PAGE_NX;
35284 trampoline_pgd[511] = init_level4_pgt[511].pgd;
35285 #endif
35286 }
35287diff --git a/arch/x86/realmode/rm/Makefile b/arch/x86/realmode/rm/Makefile
35288index 2730d77..2e4cd19 100644
35289--- a/arch/x86/realmode/rm/Makefile
35290+++ b/arch/x86/realmode/rm/Makefile
35291@@ -68,5 +68,8 @@ $(obj)/realmode.relocs: $(obj)/realmode.elf FORCE
35292
35293 KBUILD_CFLAGS := $(LINUXINCLUDE) $(REALMODE_CFLAGS) -D_SETUP -D_WAKEUP \
35294 -I$(srctree)/arch/x86/boot
35295+ifdef CONSTIFY_PLUGIN
35296+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
35297+endif
35298 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
35299 GCOV_PROFILE := n
35300diff --git a/arch/x86/realmode/rm/header.S b/arch/x86/realmode/rm/header.S
35301index a28221d..93c40f1 100644
35302--- a/arch/x86/realmode/rm/header.S
35303+++ b/arch/x86/realmode/rm/header.S
35304@@ -30,7 +30,9 @@ GLOBAL(real_mode_header)
35305 #endif
35306 /* APM/BIOS reboot */
35307 .long pa_machine_real_restart_asm
35308-#ifdef CONFIG_X86_64
35309+#ifdef CONFIG_X86_32
35310+ .long __KERNEL_CS
35311+#else
35312 .long __KERNEL32_CS
35313 #endif
35314 END(real_mode_header)
35315diff --git a/arch/x86/realmode/rm/reboot.S b/arch/x86/realmode/rm/reboot.S
35316index d66c607..3def845 100644
35317--- a/arch/x86/realmode/rm/reboot.S
35318+++ b/arch/x86/realmode/rm/reboot.S
35319@@ -27,6 +27,10 @@ ENTRY(machine_real_restart_asm)
35320 lgdtl pa_tr_gdt
35321
35322 /* Disable paging to drop us out of long mode */
35323+ movl %cr4, %eax
35324+ andl $~X86_CR4_PCIDE, %eax
35325+ movl %eax, %cr4
35326+
35327 movl %cr0, %eax
35328 andl $~X86_CR0_PG, %eax
35329 movl %eax, %cr0
35330diff --git a/arch/x86/realmode/rm/trampoline_32.S b/arch/x86/realmode/rm/trampoline_32.S
35331index 48ddd76..c26749f 100644
35332--- a/arch/x86/realmode/rm/trampoline_32.S
35333+++ b/arch/x86/realmode/rm/trampoline_32.S
35334@@ -24,6 +24,12 @@
35335 #include <asm/page_types.h>
35336 #include "realmode.h"
35337
35338+#ifdef CONFIG_PAX_KERNEXEC
35339+#define ta(X) (X)
35340+#else
35341+#define ta(X) (pa_ ## X)
35342+#endif
35343+
35344 .text
35345 .code16
35346
35347@@ -38,8 +44,6 @@ ENTRY(trampoline_start)
35348
35349 cli # We should be safe anyway
35350
35351- movl tr_start, %eax # where we need to go
35352-
35353 movl $0xA5A5A5A5, trampoline_status
35354 # write marker for master knows we're running
35355
35356@@ -55,7 +59,7 @@ ENTRY(trampoline_start)
35357 movw $1, %dx # protected mode (PE) bit
35358 lmsw %dx # into protected mode
35359
35360- ljmpl $__BOOT_CS, $pa_startup_32
35361+ ljmpl *(trampoline_header)
35362
35363 .section ".text32","ax"
35364 .code32
35365@@ -66,7 +70,7 @@ ENTRY(startup_32) # note: also used from wakeup_asm.S
35366 .balign 8
35367 GLOBAL(trampoline_header)
35368 tr_start: .space 4
35369- tr_gdt_pad: .space 2
35370+ tr_boot_cs: .space 2
35371 tr_gdt: .space 6
35372 END(trampoline_header)
35373
35374diff --git a/arch/x86/realmode/rm/trampoline_64.S b/arch/x86/realmode/rm/trampoline_64.S
35375index dac7b20..72dbaca 100644
35376--- a/arch/x86/realmode/rm/trampoline_64.S
35377+++ b/arch/x86/realmode/rm/trampoline_64.S
35378@@ -93,6 +93,7 @@ ENTRY(startup_32)
35379 movl %edx, %gs
35380
35381 movl pa_tr_cr4, %eax
35382+ andl $~X86_CR4_PCIDE, %eax
35383 movl %eax, %cr4 # Enable PAE mode
35384
35385 # Setup trampoline 4 level pagetables
35386@@ -106,7 +107,7 @@ ENTRY(startup_32)
35387 wrmsr
35388
35389 # Enable paging and in turn activate Long Mode
35390- movl $(X86_CR0_PG | X86_CR0_WP | X86_CR0_PE), %eax
35391+ movl $(X86_CR0_PG | X86_CR0_PE), %eax
35392 movl %eax, %cr0
35393
35394 /*
35395diff --git a/arch/x86/realmode/rm/wakeup_asm.S b/arch/x86/realmode/rm/wakeup_asm.S
35396index 9e7e147..25a4158 100644
35397--- a/arch/x86/realmode/rm/wakeup_asm.S
35398+++ b/arch/x86/realmode/rm/wakeup_asm.S
35399@@ -126,11 +126,10 @@ ENTRY(wakeup_start)
35400 lgdtl pmode_gdt
35401
35402 /* This really couldn't... */
35403- movl pmode_entry, %eax
35404 movl pmode_cr0, %ecx
35405 movl %ecx, %cr0
35406- ljmpl $__KERNEL_CS, $pa_startup_32
35407- /* -> jmp *%eax in trampoline_32.S */
35408+
35409+ ljmpl *pmode_entry
35410 #else
35411 jmp trampoline_start
35412 #endif
35413diff --git a/arch/x86/tools/Makefile b/arch/x86/tools/Makefile
35414index 604a37e..e49702a 100644
35415--- a/arch/x86/tools/Makefile
35416+++ b/arch/x86/tools/Makefile
35417@@ -37,7 +37,7 @@ $(obj)/test_get_len.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/in
35418
35419 $(obj)/insn_sanity.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/inat.c $(srctree)/arch/x86/include/asm/inat_types.h $(srctree)/arch/x86/include/asm/inat.h $(srctree)/arch/x86/include/asm/insn.h $(objtree)/arch/x86/lib/inat-tables.c
35420
35421-HOST_EXTRACFLAGS += -I$(srctree)/tools/include
35422+HOST_EXTRACFLAGS += -I$(srctree)/tools/include -ggdb
35423 hostprogs-y += relocs
35424 relocs-objs := relocs_32.o relocs_64.o relocs_common.o
35425 PHONY += relocs
35426diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
35427index 0c2fae8..88036b7 100644
35428--- a/arch/x86/tools/relocs.c
35429+++ b/arch/x86/tools/relocs.c
35430@@ -1,5 +1,7 @@
35431 /* This is included from relocs_32/64.c */
35432
35433+#include "../../../include/generated/autoconf.h"
35434+
35435 #define ElfW(type) _ElfW(ELF_BITS, type)
35436 #define _ElfW(bits, type) __ElfW(bits, type)
35437 #define __ElfW(bits, type) Elf##bits##_##type
35438@@ -11,6 +13,7 @@
35439 #define Elf_Sym ElfW(Sym)
35440
35441 static Elf_Ehdr ehdr;
35442+static Elf_Phdr *phdr;
35443
35444 struct relocs {
35445 uint32_t *offset;
35446@@ -386,9 +389,39 @@ static void read_ehdr(FILE *fp)
35447 }
35448 }
35449
35450+static void read_phdrs(FILE *fp)
35451+{
35452+ unsigned int i;
35453+
35454+ phdr = calloc(ehdr.e_phnum, sizeof(Elf_Phdr));
35455+ if (!phdr) {
35456+ die("Unable to allocate %d program headers\n",
35457+ ehdr.e_phnum);
35458+ }
35459+ if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
35460+ die("Seek to %d failed: %s\n",
35461+ ehdr.e_phoff, strerror(errno));
35462+ }
35463+ if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
35464+ die("Cannot read ELF program headers: %s\n",
35465+ strerror(errno));
35466+ }
35467+ for(i = 0; i < ehdr.e_phnum; i++) {
35468+ phdr[i].p_type = elf_word_to_cpu(phdr[i].p_type);
35469+ phdr[i].p_offset = elf_off_to_cpu(phdr[i].p_offset);
35470+ phdr[i].p_vaddr = elf_addr_to_cpu(phdr[i].p_vaddr);
35471+ phdr[i].p_paddr = elf_addr_to_cpu(phdr[i].p_paddr);
35472+ phdr[i].p_filesz = elf_word_to_cpu(phdr[i].p_filesz);
35473+ phdr[i].p_memsz = elf_word_to_cpu(phdr[i].p_memsz);
35474+ phdr[i].p_flags = elf_word_to_cpu(phdr[i].p_flags);
35475+ phdr[i].p_align = elf_word_to_cpu(phdr[i].p_align);
35476+ }
35477+
35478+}
35479+
35480 static void read_shdrs(FILE *fp)
35481 {
35482- int i;
35483+ unsigned int i;
35484 Elf_Shdr shdr;
35485
35486 secs = calloc(ehdr.e_shnum, sizeof(struct section));
35487@@ -423,7 +456,7 @@ static void read_shdrs(FILE *fp)
35488
35489 static void read_strtabs(FILE *fp)
35490 {
35491- int i;
35492+ unsigned int i;
35493 for (i = 0; i < ehdr.e_shnum; i++) {
35494 struct section *sec = &secs[i];
35495 if (sec->shdr.sh_type != SHT_STRTAB) {
35496@@ -448,7 +481,7 @@ static void read_strtabs(FILE *fp)
35497
35498 static void read_symtabs(FILE *fp)
35499 {
35500- int i,j;
35501+ unsigned int i,j;
35502 for (i = 0; i < ehdr.e_shnum; i++) {
35503 struct section *sec = &secs[i];
35504 if (sec->shdr.sh_type != SHT_SYMTAB) {
35505@@ -479,9 +512,11 @@ static void read_symtabs(FILE *fp)
35506 }
35507
35508
35509-static void read_relocs(FILE *fp)
35510+static void read_relocs(FILE *fp, int use_real_mode)
35511 {
35512- int i,j;
35513+ unsigned int i,j;
35514+ uint32_t base;
35515+
35516 for (i = 0; i < ehdr.e_shnum; i++) {
35517 struct section *sec = &secs[i];
35518 if (sec->shdr.sh_type != SHT_REL_TYPE) {
35519@@ -501,9 +536,22 @@ static void read_relocs(FILE *fp)
35520 die("Cannot read symbol table: %s\n",
35521 strerror(errno));
35522 }
35523+ base = 0;
35524+
35525+#ifdef CONFIG_X86_32
35526+ for (j = 0; !use_real_mode && j < ehdr.e_phnum; j++) {
35527+ if (phdr[j].p_type != PT_LOAD )
35528+ continue;
35529+ if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
35530+ continue;
35531+ base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
35532+ break;
35533+ }
35534+#endif
35535+
35536 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf_Rel); j++) {
35537 Elf_Rel *rel = &sec->reltab[j];
35538- rel->r_offset = elf_addr_to_cpu(rel->r_offset);
35539+ rel->r_offset = elf_addr_to_cpu(rel->r_offset) + base;
35540 rel->r_info = elf_xword_to_cpu(rel->r_info);
35541 #if (SHT_REL_TYPE == SHT_RELA)
35542 rel->r_addend = elf_xword_to_cpu(rel->r_addend);
35543@@ -515,7 +563,7 @@ static void read_relocs(FILE *fp)
35544
35545 static void print_absolute_symbols(void)
35546 {
35547- int i;
35548+ unsigned int i;
35549 const char *format;
35550
35551 if (ELF_BITS == 64)
35552@@ -528,7 +576,7 @@ static void print_absolute_symbols(void)
35553 for (i = 0; i < ehdr.e_shnum; i++) {
35554 struct section *sec = &secs[i];
35555 char *sym_strtab;
35556- int j;
35557+ unsigned int j;
35558
35559 if (sec->shdr.sh_type != SHT_SYMTAB) {
35560 continue;
35561@@ -555,7 +603,7 @@ static void print_absolute_symbols(void)
35562
35563 static void print_absolute_relocs(void)
35564 {
35565- int i, printed = 0;
35566+ unsigned int i, printed = 0;
35567 const char *format;
35568
35569 if (ELF_BITS == 64)
35570@@ -568,7 +616,7 @@ static void print_absolute_relocs(void)
35571 struct section *sec_applies, *sec_symtab;
35572 char *sym_strtab;
35573 Elf_Sym *sh_symtab;
35574- int j;
35575+ unsigned int j;
35576 if (sec->shdr.sh_type != SHT_REL_TYPE) {
35577 continue;
35578 }
35579@@ -645,13 +693,13 @@ static void add_reloc(struct relocs *r, uint32_t offset)
35580 static void walk_relocs(int (*process)(struct section *sec, Elf_Rel *rel,
35581 Elf_Sym *sym, const char *symname))
35582 {
35583- int i;
35584+ unsigned int i;
35585 /* Walk through the relocations */
35586 for (i = 0; i < ehdr.e_shnum; i++) {
35587 char *sym_strtab;
35588 Elf_Sym *sh_symtab;
35589 struct section *sec_applies, *sec_symtab;
35590- int j;
35591+ unsigned int j;
35592 struct section *sec = &secs[i];
35593
35594 if (sec->shdr.sh_type != SHT_REL_TYPE) {
35595@@ -830,6 +878,23 @@ static int do_reloc32(struct section *sec, Elf_Rel *rel, Elf_Sym *sym,
35596 {
35597 unsigned r_type = ELF32_R_TYPE(rel->r_info);
35598 int shn_abs = (sym->st_shndx == SHN_ABS) && !is_reloc(S_REL, symname);
35599+ char *sym_strtab = sec->link->link->strtab;
35600+
35601+ /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
35602+ if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
35603+ return 0;
35604+
35605+#ifdef CONFIG_PAX_KERNEXEC
35606+ /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
35607+ if (!strcmp(sec_name(sym->st_shndx), ".text.end") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
35608+ return 0;
35609+ if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
35610+ return 0;
35611+ if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
35612+ return 0;
35613+ if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
35614+ return 0;
35615+#endif
35616
35617 switch (r_type) {
35618 case R_386_NONE:
35619@@ -968,7 +1033,7 @@ static int write32_as_text(uint32_t v, FILE *f)
35620
35621 static void emit_relocs(int as_text, int use_real_mode)
35622 {
35623- int i;
35624+ unsigned int i;
35625 int (*write_reloc)(uint32_t, FILE *) = write32;
35626 int (*do_reloc)(struct section *sec, Elf_Rel *rel, Elf_Sym *sym,
35627 const char *symname);
35628@@ -1078,10 +1143,11 @@ void process(FILE *fp, int use_real_mode, int as_text,
35629 {
35630 regex_init(use_real_mode);
35631 read_ehdr(fp);
35632+ read_phdrs(fp);
35633 read_shdrs(fp);
35634 read_strtabs(fp);
35635 read_symtabs(fp);
35636- read_relocs(fp);
35637+ read_relocs(fp, use_real_mode);
35638 if (ELF_BITS == 64)
35639 percpu_init();
35640 if (show_absolute_syms) {
35641diff --git a/arch/x86/um/mem_32.c b/arch/x86/um/mem_32.c
35642index f40281e..92728c9 100644
35643--- a/arch/x86/um/mem_32.c
35644+++ b/arch/x86/um/mem_32.c
35645@@ -21,7 +21,7 @@ static int __init gate_vma_init(void)
35646 gate_vma.vm_start = FIXADDR_USER_START;
35647 gate_vma.vm_end = FIXADDR_USER_END;
35648 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
35649- gate_vma.vm_page_prot = __P101;
35650+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
35651
35652 return 0;
35653 }
35654diff --git a/arch/x86/um/tls_32.c b/arch/x86/um/tls_32.c
35655index 80ffa5b..a33bd15 100644
35656--- a/arch/x86/um/tls_32.c
35657+++ b/arch/x86/um/tls_32.c
35658@@ -260,7 +260,7 @@ out:
35659 if (unlikely(task == current &&
35660 !t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].flushed)) {
35661 printk(KERN_ERR "get_tls_entry: task with pid %d got here "
35662- "without flushed TLS.", current->pid);
35663+ "without flushed TLS.", task_pid_nr(current));
35664 }
35665
35666 return 0;
35667diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
35668index 8533c96..ff98c52 100644
35669--- a/arch/x86/vdso/Makefile
35670+++ b/arch/x86/vdso/Makefile
35671@@ -175,7 +175,7 @@ quiet_cmd_vdso = VDSO $@
35672 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
35673 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
35674
35675-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) \
35676+VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) \
35677 $(call cc-ldoption, -Wl$(comma)--build-id) -Wl,-Bsymbolic $(LTO_CFLAGS)
35678 GCOV_PROFILE := n
35679
35680diff --git a/arch/x86/vdso/vdso2c.h b/arch/x86/vdso/vdso2c.h
35681index 0224987..c7d65a5 100644
35682--- a/arch/x86/vdso/vdso2c.h
35683+++ b/arch/x86/vdso/vdso2c.h
35684@@ -12,7 +12,7 @@ static void BITSFUNC(go)(void *raw_addr, size_t raw_len,
35685 unsigned long load_size = -1; /* Work around bogus warning */
35686 unsigned long mapping_size;
35687 ELF(Ehdr) *hdr = (ELF(Ehdr) *)raw_addr;
35688- int i;
35689+ unsigned int i;
35690 unsigned long j;
35691 ELF(Shdr) *symtab_hdr = NULL, *strtab_hdr, *secstrings_hdr,
35692 *alt_sec = NULL;
35693diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
35694index e904c27..b9eaa03 100644
35695--- a/arch/x86/vdso/vdso32-setup.c
35696+++ b/arch/x86/vdso/vdso32-setup.c
35697@@ -14,6 +14,7 @@
35698 #include <asm/cpufeature.h>
35699 #include <asm/processor.h>
35700 #include <asm/vdso.h>
35701+#include <asm/mman.h>
35702
35703 #ifdef CONFIG_COMPAT_VDSO
35704 #define VDSO_DEFAULT 0
35705diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
35706index 1c9f750..cfddb1a 100644
35707--- a/arch/x86/vdso/vma.c
35708+++ b/arch/x86/vdso/vma.c
35709@@ -19,10 +19,7 @@
35710 #include <asm/page.h>
35711 #include <asm/hpet.h>
35712 #include <asm/desc.h>
35713-
35714-#if defined(CONFIG_X86_64)
35715-unsigned int __read_mostly vdso64_enabled = 1;
35716-#endif
35717+#include <asm/mman.h>
35718
35719 void __init init_vdso_image(const struct vdso_image *image)
35720 {
35721@@ -101,6 +98,11 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr)
35722 .pages = no_pages,
35723 };
35724
35725+#ifdef CONFIG_PAX_RANDMMAP
35726+ if (mm->pax_flags & MF_PAX_RANDMMAP)
35727+ calculate_addr = false;
35728+#endif
35729+
35730 if (calculate_addr) {
35731 addr = vdso_addr(current->mm->start_stack,
35732 image->size - image->sym_vvar_start);
35733@@ -111,14 +113,14 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr)
35734 down_write(&mm->mmap_sem);
35735
35736 addr = get_unmapped_area(NULL, addr,
35737- image->size - image->sym_vvar_start, 0, 0);
35738+ image->size - image->sym_vvar_start, 0, MAP_EXECUTABLE);
35739 if (IS_ERR_VALUE(addr)) {
35740 ret = addr;
35741 goto up_fail;
35742 }
35743
35744 text_start = addr - image->sym_vvar_start;
35745- current->mm->context.vdso = (void __user *)text_start;
35746+ mm->context.vdso = text_start;
35747
35748 /*
35749 * MAYWRITE to allow gdb to COW and set breakpoints
35750@@ -163,15 +165,12 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr)
35751 hpet_address >> PAGE_SHIFT,
35752 PAGE_SIZE,
35753 pgprot_noncached(PAGE_READONLY));
35754-
35755- if (ret)
35756- goto up_fail;
35757 }
35758 #endif
35759
35760 up_fail:
35761 if (ret)
35762- current->mm->context.vdso = NULL;
35763+ current->mm->context.vdso = 0;
35764
35765 up_write(&mm->mmap_sem);
35766 return ret;
35767@@ -191,8 +190,8 @@ static int load_vdso32(void)
35768
35769 if (selected_vdso32->sym_VDSO32_SYSENTER_RETURN)
35770 current_thread_info()->sysenter_return =
35771- current->mm->context.vdso +
35772- selected_vdso32->sym_VDSO32_SYSENTER_RETURN;
35773+ (void __force_user *)(current->mm->context.vdso +
35774+ selected_vdso32->sym_VDSO32_SYSENTER_RETURN);
35775
35776 return 0;
35777 }
35778@@ -201,9 +200,6 @@ static int load_vdso32(void)
35779 #ifdef CONFIG_X86_64
35780 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
35781 {
35782- if (!vdso64_enabled)
35783- return 0;
35784-
35785 return map_vdso(&vdso_image_64, true);
35786 }
35787
35788@@ -212,12 +208,8 @@ int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
35789 int uses_interp)
35790 {
35791 #ifdef CONFIG_X86_X32_ABI
35792- if (test_thread_flag(TIF_X32)) {
35793- if (!vdso64_enabled)
35794- return 0;
35795-
35796+ if (test_thread_flag(TIF_X32))
35797 return map_vdso(&vdso_image_x32, true);
35798- }
35799 #endif
35800
35801 return load_vdso32();
35802@@ -231,15 +223,6 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
35803 #endif
35804
35805 #ifdef CONFIG_X86_64
35806-static __init int vdso_setup(char *s)
35807-{
35808- vdso64_enabled = simple_strtoul(s, NULL, 0);
35809- return 0;
35810-}
35811-__setup("vdso=", vdso_setup);
35812-#endif
35813-
35814-#ifdef CONFIG_X86_64
35815 static void vgetcpu_cpu_init(void *arg)
35816 {
35817 int cpu = smp_processor_id();
35818diff --git a/arch/x86/xen/Kconfig b/arch/x86/xen/Kconfig
35819index e88fda8..76ce7ce 100644
35820--- a/arch/x86/xen/Kconfig
35821+++ b/arch/x86/xen/Kconfig
35822@@ -9,6 +9,7 @@ config XEN
35823 select XEN_HAVE_PVMMU
35824 depends on X86_64 || (X86_32 && X86_PAE)
35825 depends on X86_TSC
35826+ depends on !GRKERNSEC_CONFIG_AUTO || GRKERNSEC_CONFIG_VIRT_XEN
35827 help
35828 This is the Linux Xen port. Enabling this will allow the
35829 kernel to boot in a paravirtualized environment under the
35830diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
35831index 5240f56..0c12163 100644
35832--- a/arch/x86/xen/enlighten.c
35833+++ b/arch/x86/xen/enlighten.c
35834@@ -125,8 +125,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
35835
35836 struct shared_info xen_dummy_shared_info;
35837
35838-void *xen_initial_gdt;
35839-
35840 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
35841 __read_mostly int xen_have_vector_callback;
35842 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
35843@@ -544,8 +542,7 @@ static void xen_load_gdt(const struct desc_ptr *dtr)
35844 {
35845 unsigned long va = dtr->address;
35846 unsigned int size = dtr->size + 1;
35847- unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
35848- unsigned long frames[pages];
35849+ unsigned long frames[65536 / PAGE_SIZE];
35850 int f;
35851
35852 /*
35853@@ -593,8 +590,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
35854 {
35855 unsigned long va = dtr->address;
35856 unsigned int size = dtr->size + 1;
35857- unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
35858- unsigned long frames[pages];
35859+ unsigned long frames[(GDT_SIZE + PAGE_SIZE - 1) / PAGE_SIZE];
35860 int f;
35861
35862 /*
35863@@ -602,7 +598,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
35864 * 8-byte entries, or 16 4k pages..
35865 */
35866
35867- BUG_ON(size > 65536);
35868+ BUG_ON(size > GDT_SIZE);
35869 BUG_ON(va & ~PAGE_MASK);
35870
35871 for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) {
35872@@ -991,7 +987,7 @@ static u32 xen_safe_apic_wait_icr_idle(void)
35873 return 0;
35874 }
35875
35876-static void set_xen_basic_apic_ops(void)
35877+static void __init set_xen_basic_apic_ops(void)
35878 {
35879 apic->read = xen_apic_read;
35880 apic->write = xen_apic_write;
35881@@ -1308,30 +1304,30 @@ static const struct pv_apic_ops xen_apic_ops __initconst = {
35882 #endif
35883 };
35884
35885-static void xen_reboot(int reason)
35886+static __noreturn void xen_reboot(int reason)
35887 {
35888 struct sched_shutdown r = { .reason = reason };
35889
35890- if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))
35891- BUG();
35892+ HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
35893+ BUG();
35894 }
35895
35896-static void xen_restart(char *msg)
35897+static __noreturn void xen_restart(char *msg)
35898 {
35899 xen_reboot(SHUTDOWN_reboot);
35900 }
35901
35902-static void xen_emergency_restart(void)
35903+static __noreturn void xen_emergency_restart(void)
35904 {
35905 xen_reboot(SHUTDOWN_reboot);
35906 }
35907
35908-static void xen_machine_halt(void)
35909+static __noreturn void xen_machine_halt(void)
35910 {
35911 xen_reboot(SHUTDOWN_poweroff);
35912 }
35913
35914-static void xen_machine_power_off(void)
35915+static __noreturn void xen_machine_power_off(void)
35916 {
35917 if (pm_power_off)
35918 pm_power_off();
35919@@ -1484,8 +1480,11 @@ static void __ref xen_setup_gdt(int cpu)
35920 pv_cpu_ops.write_gdt_entry = xen_write_gdt_entry_boot;
35921 pv_cpu_ops.load_gdt = xen_load_gdt_boot;
35922
35923- setup_stack_canary_segment(0);
35924- switch_to_new_gdt(0);
35925+ setup_stack_canary_segment(cpu);
35926+#ifdef CONFIG_X86_64
35927+ load_percpu_segment(cpu);
35928+#endif
35929+ switch_to_new_gdt(cpu);
35930
35931 pv_cpu_ops.write_gdt_entry = xen_write_gdt_entry;
35932 pv_cpu_ops.load_gdt = xen_load_gdt;
35933@@ -1600,7 +1599,17 @@ asmlinkage __visible void __init xen_start_kernel(void)
35934 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
35935
35936 /* Work out if we support NX */
35937- x86_configure_nx();
35938+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
35939+ if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
35940+ (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
35941+ unsigned l, h;
35942+
35943+ __supported_pte_mask |= _PAGE_NX;
35944+ rdmsr(MSR_EFER, l, h);
35945+ l |= EFER_NX;
35946+ wrmsr(MSR_EFER, l, h);
35947+ }
35948+#endif
35949
35950 /* Get mfn list */
35951 xen_build_dynamic_phys_to_machine();
35952@@ -1628,13 +1637,6 @@ asmlinkage __visible void __init xen_start_kernel(void)
35953
35954 machine_ops = xen_machine_ops;
35955
35956- /*
35957- * The only reliable way to retain the initial address of the
35958- * percpu gdt_page is to remember it here, so we can go and
35959- * mark it RW later, when the initial percpu area is freed.
35960- */
35961- xen_initial_gdt = &per_cpu(gdt_page, 0);
35962-
35963 xen_smp_init();
35964
35965 #ifdef CONFIG_ACPI_NUMA
35966diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
35967index adca9e2..cdba9d1 100644
35968--- a/arch/x86/xen/mmu.c
35969+++ b/arch/x86/xen/mmu.c
35970@@ -379,7 +379,7 @@ static pteval_t pte_mfn_to_pfn(pteval_t val)
35971 return val;
35972 }
35973
35974-static pteval_t pte_pfn_to_mfn(pteval_t val)
35975+static pteval_t __intentional_overflow(-1) pte_pfn_to_mfn(pteval_t val)
35976 {
35977 if (val & _PAGE_PRESENT) {
35978 unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
35979@@ -1835,7 +1835,11 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
35980 * L3_k[511] -> level2_fixmap_pgt */
35981 convert_pfn_mfn(level3_kernel_pgt);
35982
35983+ convert_pfn_mfn(level3_vmalloc_start_pgt);
35984+ convert_pfn_mfn(level3_vmalloc_end_pgt);
35985+ convert_pfn_mfn(level3_vmemmap_pgt);
35986 /* L3_k[511][506] -> level1_fixmap_pgt */
35987+ /* L3_k[511][507] -> level1_vsyscall_pgt */
35988 convert_pfn_mfn(level2_fixmap_pgt);
35989 }
35990 /* We get [511][511] and have Xen's version of level2_kernel_pgt */
35991@@ -1860,11 +1864,18 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
35992 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
35993 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
35994 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
35995+ set_page_prot(level3_vmalloc_start_pgt, PAGE_KERNEL_RO);
35996+ set_page_prot(level3_vmalloc_end_pgt, PAGE_KERNEL_RO);
35997+ set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
35998 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
35999 set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
36000+ set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
36001 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
36002 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
36003- set_page_prot(level1_fixmap_pgt, PAGE_KERNEL_RO);
36004+ set_page_prot(level1_fixmap_pgt[0], PAGE_KERNEL_RO);
36005+ set_page_prot(level1_fixmap_pgt[1], PAGE_KERNEL_RO);
36006+ set_page_prot(level1_fixmap_pgt[2], PAGE_KERNEL_RO);
36007+ set_page_prot(level1_vsyscall_pgt, PAGE_KERNEL_RO);
36008
36009 /* Pin down new L4 */
36010 pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
36011@@ -2048,6 +2059,7 @@ static void __init xen_post_allocator_init(void)
36012 pv_mmu_ops.set_pud = xen_set_pud;
36013 #if PAGETABLE_LEVELS == 4
36014 pv_mmu_ops.set_pgd = xen_set_pgd;
36015+ pv_mmu_ops.set_pgd_batched = xen_set_pgd;
36016 #endif
36017
36018 /* This will work as long as patching hasn't happened yet
36019@@ -2126,6 +2138,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
36020 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
36021 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
36022 .set_pgd = xen_set_pgd_hyper,
36023+ .set_pgd_batched = xen_set_pgd_hyper,
36024
36025 .alloc_pud = xen_alloc_pmd_init,
36026 .release_pud = xen_release_pmd_init,
36027diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
36028index 08e8489..b1e182f 100644
36029--- a/arch/x86/xen/smp.c
36030+++ b/arch/x86/xen/smp.c
36031@@ -288,17 +288,13 @@ static void __init xen_smp_prepare_boot_cpu(void)
36032
36033 if (xen_pv_domain()) {
36034 if (!xen_feature(XENFEAT_writable_page_tables))
36035- /* We've switched to the "real" per-cpu gdt, so make
36036- * sure the old memory can be recycled. */
36037- make_lowmem_page_readwrite(xen_initial_gdt);
36038-
36039 #ifdef CONFIG_X86_32
36040 /*
36041 * Xen starts us with XEN_FLAT_RING1_DS, but linux code
36042 * expects __USER_DS
36043 */
36044- loadsegment(ds, __USER_DS);
36045- loadsegment(es, __USER_DS);
36046+ loadsegment(ds, __KERNEL_DS);
36047+ loadsegment(es, __KERNEL_DS);
36048 #endif
36049
36050 xen_filter_cpu_maps();
36051@@ -379,7 +375,7 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
36052 #ifdef CONFIG_X86_32
36053 /* Note: PVH is not yet supported on x86_32. */
36054 ctxt->user_regs.fs = __KERNEL_PERCPU;
36055- ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
36056+ savesegment(gs, ctxt->user_regs.gs);
36057 #endif
36058 memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt));
36059
36060@@ -387,8 +383,8 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
36061 ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle;
36062 ctxt->flags = VGCF_IN_KERNEL;
36063 ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */
36064- ctxt->user_regs.ds = __USER_DS;
36065- ctxt->user_regs.es = __USER_DS;
36066+ ctxt->user_regs.ds = __KERNEL_DS;
36067+ ctxt->user_regs.es = __KERNEL_DS;
36068 ctxt->user_regs.ss = __KERNEL_DS;
36069
36070 xen_copy_trap_info(ctxt->trap_ctxt);
36071@@ -446,14 +442,13 @@ static int xen_cpu_up(unsigned int cpu, struct task_struct *idle)
36072 int rc;
36073
36074 per_cpu(current_task, cpu) = idle;
36075+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
36076 #ifdef CONFIG_X86_32
36077 irq_ctx_init(cpu);
36078 #else
36079 clear_tsk_thread_flag(idle, TIF_FORK);
36080 #endif
36081- per_cpu(kernel_stack, cpu) =
36082- (unsigned long)task_stack_page(idle) -
36083- KERNEL_STACK_OFFSET + THREAD_SIZE;
36084+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
36085
36086 xen_setup_runstate_info(cpu);
36087 xen_setup_timer(cpu);
36088@@ -732,7 +727,7 @@ static const struct smp_ops xen_smp_ops __initconst = {
36089
36090 void __init xen_smp_init(void)
36091 {
36092- smp_ops = xen_smp_ops;
36093+ memcpy((void *)&smp_ops, &xen_smp_ops, sizeof smp_ops);
36094 xen_fill_possible_map();
36095 }
36096
36097diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
36098index fd92a64..1f72641 100644
36099--- a/arch/x86/xen/xen-asm_32.S
36100+++ b/arch/x86/xen/xen-asm_32.S
36101@@ -99,7 +99,7 @@ ENTRY(xen_iret)
36102 pushw %fs
36103 movl $(__KERNEL_PERCPU), %eax
36104 movl %eax, %fs
36105- movl %fs:xen_vcpu, %eax
36106+ mov PER_CPU_VAR(xen_vcpu), %eax
36107 POP_FS
36108 #else
36109 movl %ss:xen_vcpu, %eax
36110diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
36111index 674b2225..f1f5dc1 100644
36112--- a/arch/x86/xen/xen-head.S
36113+++ b/arch/x86/xen/xen-head.S
36114@@ -39,6 +39,17 @@ ENTRY(startup_xen)
36115 #ifdef CONFIG_X86_32
36116 mov %esi,xen_start_info
36117 mov $init_thread_union+THREAD_SIZE,%esp
36118+#ifdef CONFIG_SMP
36119+ movl $cpu_gdt_table,%edi
36120+ movl $__per_cpu_load,%eax
36121+ movw %ax,__KERNEL_PERCPU + 2(%edi)
36122+ rorl $16,%eax
36123+ movb %al,__KERNEL_PERCPU + 4(%edi)
36124+ movb %ah,__KERNEL_PERCPU + 7(%edi)
36125+ movl $__per_cpu_end - 1,%eax
36126+ subl $__per_cpu_start,%eax
36127+ movw %ax,__KERNEL_PERCPU + 0(%edi)
36128+#endif
36129 #else
36130 mov %rsi,xen_start_info
36131 mov $init_thread_union+THREAD_SIZE,%rsp
36132diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
36133index 9e195c6..523ed36 100644
36134--- a/arch/x86/xen/xen-ops.h
36135+++ b/arch/x86/xen/xen-ops.h
36136@@ -16,8 +16,6 @@ void xen_syscall_target(void);
36137 void xen_syscall32_target(void);
36138 #endif
36139
36140-extern void *xen_initial_gdt;
36141-
36142 struct trap_info;
36143 void xen_copy_trap_info(struct trap_info *traps);
36144
36145diff --git a/arch/xtensa/variants/dc232b/include/variant/core.h b/arch/xtensa/variants/dc232b/include/variant/core.h
36146index 525bd3d..ef888b1 100644
36147--- a/arch/xtensa/variants/dc232b/include/variant/core.h
36148+++ b/arch/xtensa/variants/dc232b/include/variant/core.h
36149@@ -119,9 +119,9 @@
36150 ----------------------------------------------------------------------*/
36151
36152 #define XCHAL_ICACHE_LINESIZE 32 /* I-cache line size in bytes */
36153-#define XCHAL_DCACHE_LINESIZE 32 /* D-cache line size in bytes */
36154 #define XCHAL_ICACHE_LINEWIDTH 5 /* log2(I line size in bytes) */
36155 #define XCHAL_DCACHE_LINEWIDTH 5 /* log2(D line size in bytes) */
36156+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
36157
36158 #define XCHAL_ICACHE_SIZE 16384 /* I-cache size in bytes or 0 */
36159 #define XCHAL_DCACHE_SIZE 16384 /* D-cache size in bytes or 0 */
36160diff --git a/arch/xtensa/variants/fsf/include/variant/core.h b/arch/xtensa/variants/fsf/include/variant/core.h
36161index 2f33760..835e50a 100644
36162--- a/arch/xtensa/variants/fsf/include/variant/core.h
36163+++ b/arch/xtensa/variants/fsf/include/variant/core.h
36164@@ -11,6 +11,7 @@
36165 #ifndef _XTENSA_CORE_H
36166 #define _XTENSA_CORE_H
36167
36168+#include <linux/const.h>
36169
36170 /****************************************************************************
36171 Parameters Useful for Any Code, USER or PRIVILEGED
36172@@ -112,9 +113,9 @@
36173 ----------------------------------------------------------------------*/
36174
36175 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
36176-#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
36177 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
36178 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
36179+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
36180
36181 #define XCHAL_ICACHE_SIZE 8192 /* I-cache size in bytes or 0 */
36182 #define XCHAL_DCACHE_SIZE 8192 /* D-cache size in bytes or 0 */
36183diff --git a/block/bio.c b/block/bio.c
36184index f66a4ea..73ddf55 100644
36185--- a/block/bio.c
36186+++ b/block/bio.c
36187@@ -1172,7 +1172,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
36188 /*
36189 * Overflow, abort
36190 */
36191- if (end < start)
36192+ if (end < start || end - start > INT_MAX - nr_pages)
36193 return ERR_PTR(-EINVAL);
36194
36195 nr_pages += end - start;
36196@@ -1297,7 +1297,7 @@ struct bio *bio_map_user_iov(struct request_queue *q,
36197 /*
36198 * Overflow, abort
36199 */
36200- if (end < start)
36201+ if (end < start || end - start > INT_MAX - nr_pages)
36202 return ERR_PTR(-EINVAL);
36203
36204 nr_pages += end - start;
36205diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
36206index 0736729..2ec3b48 100644
36207--- a/block/blk-iopoll.c
36208+++ b/block/blk-iopoll.c
36209@@ -74,7 +74,7 @@ void blk_iopoll_complete(struct blk_iopoll *iop)
36210 }
36211 EXPORT_SYMBOL(blk_iopoll_complete);
36212
36213-static void blk_iopoll_softirq(struct softirq_action *h)
36214+static __latent_entropy void blk_iopoll_softirq(void)
36215 {
36216 struct list_head *list = this_cpu_ptr(&blk_cpu_iopoll);
36217 int rearm = 0, budget = blk_iopoll_budget;
36218diff --git a/block/blk-map.c b/block/blk-map.c
36219index b8d2725..08c52b0 100644
36220--- a/block/blk-map.c
36221+++ b/block/blk-map.c
36222@@ -192,7 +192,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
36223 if (!len || !kbuf)
36224 return -EINVAL;
36225
36226- do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
36227+ do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
36228 if (do_copy)
36229 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
36230 else
36231diff --git a/block/blk-softirq.c b/block/blk-softirq.c
36232index 53b1737..08177d2e 100644
36233--- a/block/blk-softirq.c
36234+++ b/block/blk-softirq.c
36235@@ -18,7 +18,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
36236 * Softirq action handler - move entries to local list and loop over them
36237 * while passing them to the queue registered handler.
36238 */
36239-static void blk_done_softirq(struct softirq_action *h)
36240+static __latent_entropy void blk_done_softirq(void)
36241 {
36242 struct list_head *cpu_list, local_list;
36243
36244diff --git a/block/bsg.c b/block/bsg.c
36245index d214e92..9649863 100644
36246--- a/block/bsg.c
36247+++ b/block/bsg.c
36248@@ -140,16 +140,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
36249 struct sg_io_v4 *hdr, struct bsg_device *bd,
36250 fmode_t has_write_perm)
36251 {
36252+ unsigned char tmpcmd[sizeof(rq->__cmd)];
36253+ unsigned char *cmdptr;
36254+
36255 if (hdr->request_len > BLK_MAX_CDB) {
36256 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
36257 if (!rq->cmd)
36258 return -ENOMEM;
36259- }
36260+ cmdptr = rq->cmd;
36261+ } else
36262+ cmdptr = tmpcmd;
36263
36264- if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request,
36265+ if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
36266 hdr->request_len))
36267 return -EFAULT;
36268
36269+ if (cmdptr != rq->cmd)
36270+ memcpy(rq->cmd, cmdptr, hdr->request_len);
36271+
36272 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
36273 if (blk_verify_command(rq->cmd, has_write_perm))
36274 return -EPERM;
36275diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
36276index f678c73..f35aa18 100644
36277--- a/block/compat_ioctl.c
36278+++ b/block/compat_ioctl.c
36279@@ -156,7 +156,7 @@ static int compat_cdrom_generic_command(struct block_device *bdev, fmode_t mode,
36280 cgc = compat_alloc_user_space(sizeof(*cgc));
36281 cgc32 = compat_ptr(arg);
36282
36283- if (copy_in_user(&cgc->cmd, &cgc32->cmd, sizeof(cgc->cmd)) ||
36284+ if (copy_in_user(cgc->cmd, cgc32->cmd, sizeof(cgc->cmd)) ||
36285 get_user(data, &cgc32->buffer) ||
36286 put_user(compat_ptr(data), &cgc->buffer) ||
36287 copy_in_user(&cgc->buflen, &cgc32->buflen,
36288@@ -341,7 +341,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
36289 err |= __get_user(f->spec1, &uf->spec1);
36290 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
36291 err |= __get_user(name, &uf->name);
36292- f->name = compat_ptr(name);
36293+ f->name = (void __force_kernel *)compat_ptr(name);
36294 if (err) {
36295 err = -EFAULT;
36296 goto out;
36297diff --git a/block/genhd.c b/block/genhd.c
36298index ea982ea..86e0f9e 100644
36299--- a/block/genhd.c
36300+++ b/block/genhd.c
36301@@ -469,21 +469,24 @@ static char *bdevt_str(dev_t devt, char *buf)
36302
36303 /*
36304 * Register device numbers dev..(dev+range-1)
36305- * range must be nonzero
36306+ * Noop if @range is zero.
36307 * The hash chain is sorted on range, so that subranges can override.
36308 */
36309 void blk_register_region(dev_t devt, unsigned long range, struct module *module,
36310 struct kobject *(*probe)(dev_t, int *, void *),
36311 int (*lock)(dev_t, void *), void *data)
36312 {
36313- kobj_map(bdev_map, devt, range, module, probe, lock, data);
36314+ if (range)
36315+ kobj_map(bdev_map, devt, range, module, probe, lock, data);
36316 }
36317
36318 EXPORT_SYMBOL(blk_register_region);
36319
36320+/* undo blk_register_region(), noop if @range is zero */
36321 void blk_unregister_region(dev_t devt, unsigned long range)
36322 {
36323- kobj_unmap(bdev_map, devt, range);
36324+ if (range)
36325+ kobj_unmap(bdev_map, devt, range);
36326 }
36327
36328 EXPORT_SYMBOL(blk_unregister_region);
36329diff --git a/block/partitions/efi.c b/block/partitions/efi.c
36330index 26cb624..a49c3a5 100644
36331--- a/block/partitions/efi.c
36332+++ b/block/partitions/efi.c
36333@@ -293,14 +293,14 @@ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state,
36334 if (!gpt)
36335 return NULL;
36336
36337+ if (!le32_to_cpu(gpt->num_partition_entries))
36338+ return NULL;
36339+ pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL);
36340+ if (!pte)
36341+ return NULL;
36342+
36343 count = le32_to_cpu(gpt->num_partition_entries) *
36344 le32_to_cpu(gpt->sizeof_partition_entry);
36345- if (!count)
36346- return NULL;
36347- pte = kmalloc(count, GFP_KERNEL);
36348- if (!pte)
36349- return NULL;
36350-
36351 if (read_lba(state, le64_to_cpu(gpt->partition_entry_lba),
36352 (u8 *) pte, count) < count) {
36353 kfree(pte);
36354diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
36355index e1f71c3..02d295a 100644
36356--- a/block/scsi_ioctl.c
36357+++ b/block/scsi_ioctl.c
36358@@ -67,7 +67,7 @@ static int scsi_get_bus(struct request_queue *q, int __user *p)
36359 return put_user(0, p);
36360 }
36361
36362-static int sg_get_timeout(struct request_queue *q)
36363+static int __intentional_overflow(-1) sg_get_timeout(struct request_queue *q)
36364 {
36365 return jiffies_to_clock_t(q->sg_timeout);
36366 }
36367@@ -227,8 +227,20 @@ EXPORT_SYMBOL(blk_verify_command);
36368 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
36369 struct sg_io_hdr *hdr, fmode_t mode)
36370 {
36371- if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
36372+ unsigned char tmpcmd[sizeof(rq->__cmd)];
36373+ unsigned char *cmdptr;
36374+
36375+ if (rq->cmd != rq->__cmd)
36376+ cmdptr = rq->cmd;
36377+ else
36378+ cmdptr = tmpcmd;
36379+
36380+ if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
36381 return -EFAULT;
36382+
36383+ if (cmdptr != rq->cmd)
36384+ memcpy(rq->cmd, cmdptr, hdr->cmd_len);
36385+
36386 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
36387 return -EPERM;
36388
36389@@ -422,6 +434,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
36390 int err;
36391 unsigned int in_len, out_len, bytes, opcode, cmdlen;
36392 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
36393+ unsigned char tmpcmd[sizeof(rq->__cmd)];
36394+ unsigned char *cmdptr;
36395
36396 if (!sic)
36397 return -EINVAL;
36398@@ -460,9 +474,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
36399 */
36400 err = -EFAULT;
36401 rq->cmd_len = cmdlen;
36402- if (copy_from_user(rq->cmd, sic->data, cmdlen))
36403+
36404+ if (rq->cmd != rq->__cmd)
36405+ cmdptr = rq->cmd;
36406+ else
36407+ cmdptr = tmpcmd;
36408+
36409+ if (copy_from_user(cmdptr, sic->data, cmdlen))
36410 goto error;
36411
36412+ if (rq->cmd != cmdptr)
36413+ memcpy(rq->cmd, cmdptr, cmdlen);
36414+
36415 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
36416 goto error;
36417
36418diff --git a/crypto/cryptd.c b/crypto/cryptd.c
36419index 650afac1..f3307de 100644
36420--- a/crypto/cryptd.c
36421+++ b/crypto/cryptd.c
36422@@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
36423
36424 struct cryptd_blkcipher_request_ctx {
36425 crypto_completion_t complete;
36426-};
36427+} __no_const;
36428
36429 struct cryptd_hash_ctx {
36430 struct crypto_shash *child;
36431@@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
36432
36433 struct cryptd_aead_request_ctx {
36434 crypto_completion_t complete;
36435-};
36436+} __no_const;
36437
36438 static void cryptd_queue_worker(struct work_struct *work);
36439
36440diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c
36441index c305d41..a96de79 100644
36442--- a/crypto/pcrypt.c
36443+++ b/crypto/pcrypt.c
36444@@ -440,7 +440,7 @@ static int pcrypt_sysfs_add(struct padata_instance *pinst, const char *name)
36445 int ret;
36446
36447 pinst->kobj.kset = pcrypt_kset;
36448- ret = kobject_add(&pinst->kobj, NULL, name);
36449+ ret = kobject_add(&pinst->kobj, NULL, "%s", name);
36450 if (!ret)
36451 kobject_uevent(&pinst->kobj, KOBJ_ADD);
36452
36453diff --git a/crypto/zlib.c b/crypto/zlib.c
36454index 0eefa9d..0fa3d29 100644
36455--- a/crypto/zlib.c
36456+++ b/crypto/zlib.c
36457@@ -95,10 +95,10 @@ static int zlib_compress_setup(struct crypto_pcomp *tfm, void *params,
36458 zlib_comp_exit(ctx);
36459
36460 window_bits = tb[ZLIB_COMP_WINDOWBITS]
36461- ? nla_get_u32(tb[ZLIB_COMP_WINDOWBITS])
36462+ ? nla_get_s32(tb[ZLIB_COMP_WINDOWBITS])
36463 : MAX_WBITS;
36464 mem_level = tb[ZLIB_COMP_MEMLEVEL]
36465- ? nla_get_u32(tb[ZLIB_COMP_MEMLEVEL])
36466+ ? nla_get_s32(tb[ZLIB_COMP_MEMLEVEL])
36467 : DEF_MEM_LEVEL;
36468
36469 workspacesize = zlib_deflate_workspacesize(window_bits, mem_level);
36470diff --git a/drivers/acpi/acpica/hwxfsleep.c b/drivers/acpi/acpica/hwxfsleep.c
36471index 3b37676..898edfa 100644
36472--- a/drivers/acpi/acpica/hwxfsleep.c
36473+++ b/drivers/acpi/acpica/hwxfsleep.c
36474@@ -63,11 +63,12 @@ static acpi_status acpi_hw_sleep_dispatch(u8 sleep_state, u32 function_id);
36475 /* Legacy functions are optional, based upon ACPI_REDUCED_HARDWARE */
36476
36477 static struct acpi_sleep_functions acpi_sleep_dispatch[] = {
36478- {ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_sleep),
36479- acpi_hw_extended_sleep},
36480- {ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake_prep),
36481- acpi_hw_extended_wake_prep},
36482- {ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake), acpi_hw_extended_wake}
36483+ {.legacy_function = ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_sleep),
36484+ .extended_function = acpi_hw_extended_sleep},
36485+ {.legacy_function = ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake_prep),
36486+ .extended_function = acpi_hw_extended_wake_prep},
36487+ {.legacy_function = ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake),
36488+ .extended_function = acpi_hw_extended_wake}
36489 };
36490
36491 /*
36492diff --git a/drivers/acpi/apei/apei-internal.h b/drivers/acpi/apei/apei-internal.h
36493index 16129c7..8b675cd 100644
36494--- a/drivers/acpi/apei/apei-internal.h
36495+++ b/drivers/acpi/apei/apei-internal.h
36496@@ -19,7 +19,7 @@ typedef int (*apei_exec_ins_func_t)(struct apei_exec_context *ctx,
36497 struct apei_exec_ins_type {
36498 u32 flags;
36499 apei_exec_ins_func_t run;
36500-};
36501+} __do_const;
36502
36503 struct apei_exec_context {
36504 u32 ip;
36505diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
36506index e82d097..0c855c1 100644
36507--- a/drivers/acpi/apei/ghes.c
36508+++ b/drivers/acpi/apei/ghes.c
36509@@ -478,7 +478,7 @@ static void __ghes_print_estatus(const char *pfx,
36510 const struct acpi_hest_generic *generic,
36511 const struct acpi_hest_generic_status *estatus)
36512 {
36513- static atomic_t seqno;
36514+ static atomic_unchecked_t seqno;
36515 unsigned int curr_seqno;
36516 char pfx_seq[64];
36517
36518@@ -489,7 +489,7 @@ static void __ghes_print_estatus(const char *pfx,
36519 else
36520 pfx = KERN_ERR;
36521 }
36522- curr_seqno = atomic_inc_return(&seqno);
36523+ curr_seqno = atomic_inc_return_unchecked(&seqno);
36524 snprintf(pfx_seq, sizeof(pfx_seq), "%s{%u}" HW_ERR, pfx, curr_seqno);
36525 printk("%s""Hardware error from APEI Generic Hardware Error Source: %d\n",
36526 pfx_seq, generic->header.source_id);
36527diff --git a/drivers/acpi/bgrt.c b/drivers/acpi/bgrt.c
36528index a83e3c6..c3d617f 100644
36529--- a/drivers/acpi/bgrt.c
36530+++ b/drivers/acpi/bgrt.c
36531@@ -86,8 +86,10 @@ static int __init bgrt_init(void)
36532 if (!bgrt_image)
36533 return -ENODEV;
36534
36535- bin_attr_image.private = bgrt_image;
36536- bin_attr_image.size = bgrt_image_size;
36537+ pax_open_kernel();
36538+ *(void **)&bin_attr_image.private = bgrt_image;
36539+ *(size_t *)&bin_attr_image.size = bgrt_image_size;
36540+ pax_close_kernel();
36541
36542 bgrt_kobj = kobject_create_and_add("bgrt", acpi_kobj);
36543 if (!bgrt_kobj)
36544diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
36545index 9b693d5..8953d54 100644
36546--- a/drivers/acpi/blacklist.c
36547+++ b/drivers/acpi/blacklist.c
36548@@ -51,7 +51,7 @@ struct acpi_blacklist_item {
36549 u32 is_critical_error;
36550 };
36551
36552-static struct dmi_system_id acpi_osi_dmi_table[] __initdata;
36553+static const struct dmi_system_id acpi_osi_dmi_table[] __initconst;
36554
36555 /*
36556 * POLICY: If *anything* doesn't work, put it on the blacklist.
36557@@ -163,7 +163,7 @@ static int __init dmi_disable_osi_win8(const struct dmi_system_id *d)
36558 return 0;
36559 }
36560
36561-static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
36562+static const struct dmi_system_id acpi_osi_dmi_table[] __initconst = {
36563 {
36564 .callback = dmi_disable_osi_vista,
36565 .ident = "Fujitsu Siemens",
36566diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
36567index 8b67bd0..b59593e 100644
36568--- a/drivers/acpi/bus.c
36569+++ b/drivers/acpi/bus.c
36570@@ -67,7 +67,7 @@ static int set_copy_dsdt(const struct dmi_system_id *id)
36571 }
36572 #endif
36573
36574-static struct dmi_system_id dsdt_dmi_table[] __initdata = {
36575+static const struct dmi_system_id dsdt_dmi_table[] __initconst = {
36576 /*
36577 * Invoke DSDT corruption work-around on all Toshiba Satellite.
36578 * https://bugzilla.kernel.org/show_bug.cgi?id=14679
36579@@ -83,7 +83,7 @@ static struct dmi_system_id dsdt_dmi_table[] __initdata = {
36580 {}
36581 };
36582 #else
36583-static struct dmi_system_id dsdt_dmi_table[] __initdata = {
36584+static const struct dmi_system_id dsdt_dmi_table[] __initconst = {
36585 {}
36586 };
36587 #endif
36588diff --git a/drivers/acpi/custom_method.c b/drivers/acpi/custom_method.c
36589index c68e724..e863008 100644
36590--- a/drivers/acpi/custom_method.c
36591+++ b/drivers/acpi/custom_method.c
36592@@ -29,6 +29,10 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
36593 struct acpi_table_header table;
36594 acpi_status status;
36595
36596+#ifdef CONFIG_GRKERNSEC_KMEM
36597+ return -EPERM;
36598+#endif
36599+
36600 if (!(*ppos)) {
36601 /* parse the table header to get the table length */
36602 if (count <= sizeof(struct acpi_table_header))
36603diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
36604index 735db11..91e07ff 100644
36605--- a/drivers/acpi/device_pm.c
36606+++ b/drivers/acpi/device_pm.c
36607@@ -1025,6 +1025,8 @@ EXPORT_SYMBOL_GPL(acpi_subsys_freeze);
36608
36609 #endif /* CONFIG_PM_SLEEP */
36610
36611+static void acpi_dev_pm_detach(struct device *dev, bool power_off);
36612+
36613 static struct dev_pm_domain acpi_general_pm_domain = {
36614 .ops = {
36615 .runtime_suspend = acpi_subsys_runtime_suspend,
36616@@ -1041,6 +1043,7 @@ static struct dev_pm_domain acpi_general_pm_domain = {
36617 .restore_early = acpi_subsys_resume_early,
36618 #endif
36619 },
36620+ .detach = acpi_dev_pm_detach
36621 };
36622
36623 /**
36624@@ -1110,7 +1113,6 @@ int acpi_dev_pm_attach(struct device *dev, bool power_on)
36625 acpi_device_wakeup(adev, ACPI_STATE_S0, false);
36626 }
36627
36628- dev->pm_domain->detach = acpi_dev_pm_detach;
36629 return 0;
36630 }
36631 EXPORT_SYMBOL_GPL(acpi_dev_pm_attach);
36632diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
36633index a8dd2f7..e15950e 100644
36634--- a/drivers/acpi/ec.c
36635+++ b/drivers/acpi/ec.c
36636@@ -1242,7 +1242,7 @@ static int ec_clear_on_resume(const struct dmi_system_id *id)
36637 return 0;
36638 }
36639
36640-static struct dmi_system_id ec_dmi_table[] __initdata = {
36641+static const struct dmi_system_id ec_dmi_table[] __initconst = {
36642 {
36643 ec_skip_dsdt_scan, "Compal JFL92", {
36644 DMI_MATCH(DMI_BIOS_VENDOR, "COMPAL"),
36645diff --git a/drivers/acpi/pci_slot.c b/drivers/acpi/pci_slot.c
36646index 139d9e4..9a9d799 100644
36647--- a/drivers/acpi/pci_slot.c
36648+++ b/drivers/acpi/pci_slot.c
36649@@ -195,7 +195,7 @@ static int do_sta_before_sun(const struct dmi_system_id *d)
36650 return 0;
36651 }
36652
36653-static struct dmi_system_id acpi_pci_slot_dmi_table[] __initdata = {
36654+static const struct dmi_system_id acpi_pci_slot_dmi_table[] __initconst = {
36655 /*
36656 * Fujitsu Primequest machines will return 1023 to indicate an
36657 * error if the _SUN method is evaluated on SxFy objects that
36658diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
36659index d9f7158..168e742 100644
36660--- a/drivers/acpi/processor_driver.c
36661+++ b/drivers/acpi/processor_driver.c
36662@@ -159,7 +159,7 @@ static int acpi_cpu_soft_notify(struct notifier_block *nfb,
36663 return NOTIFY_OK;
36664 }
36665
36666-static struct notifier_block __refdata acpi_cpu_notifier = {
36667+static struct notifier_block __refconst acpi_cpu_notifier = {
36668 .notifier_call = acpi_cpu_soft_notify,
36669 };
36670
36671diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
36672index f98db0b..8309c83 100644
36673--- a/drivers/acpi/processor_idle.c
36674+++ b/drivers/acpi/processor_idle.c
36675@@ -912,7 +912,7 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
36676 {
36677 int i, count = CPUIDLE_DRIVER_STATE_START;
36678 struct acpi_processor_cx *cx;
36679- struct cpuidle_state *state;
36680+ cpuidle_state_no_const *state;
36681 struct cpuidle_driver *drv = &acpi_idle_driver;
36682
36683 if (!pr->flags.power_setup_done)
36684diff --git a/drivers/acpi/processor_pdc.c b/drivers/acpi/processor_pdc.c
36685index e5dd808..1eceed1 100644
36686--- a/drivers/acpi/processor_pdc.c
36687+++ b/drivers/acpi/processor_pdc.c
36688@@ -176,7 +176,7 @@ static int __init set_no_mwait(const struct dmi_system_id *id)
36689 return 0;
36690 }
36691
36692-static struct dmi_system_id processor_idle_dmi_table[] __initdata = {
36693+static const struct dmi_system_id processor_idle_dmi_table[] __initconst = {
36694 {
36695 set_no_mwait, "Extensa 5220", {
36696 DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"),
36697diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
36698index 7f251dd..47b262c 100644
36699--- a/drivers/acpi/sleep.c
36700+++ b/drivers/acpi/sleep.c
36701@@ -148,7 +148,7 @@ static int __init init_nvs_nosave(const struct dmi_system_id *d)
36702 return 0;
36703 }
36704
36705-static struct dmi_system_id acpisleep_dmi_table[] __initdata = {
36706+static const struct dmi_system_id acpisleep_dmi_table[] __initconst = {
36707 {
36708 .callback = init_old_suspend_ordering,
36709 .ident = "Abit KN9 (nForce4 variant)",
36710diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
36711index 13e577c..cef11ee 100644
36712--- a/drivers/acpi/sysfs.c
36713+++ b/drivers/acpi/sysfs.c
36714@@ -423,11 +423,11 @@ static u32 num_counters;
36715 static struct attribute **all_attrs;
36716 static u32 acpi_gpe_count;
36717
36718-static struct attribute_group interrupt_stats_attr_group = {
36719+static attribute_group_no_const interrupt_stats_attr_group = {
36720 .name = "interrupts",
36721 };
36722
36723-static struct kobj_attribute *counter_attrs;
36724+static kobj_attribute_no_const *counter_attrs;
36725
36726 static void delete_gpe_attr_array(void)
36727 {
36728diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
36729index d24fa19..782f1e6 100644
36730--- a/drivers/acpi/thermal.c
36731+++ b/drivers/acpi/thermal.c
36732@@ -1209,7 +1209,7 @@ static int thermal_psv(const struct dmi_system_id *d) {
36733 return 0;
36734 }
36735
36736-static struct dmi_system_id thermal_dmi_table[] __initdata = {
36737+static const struct dmi_system_id thermal_dmi_table[] __initconst = {
36738 /*
36739 * Award BIOS on this AOpen makes thermal control almost worthless.
36740 * http://bugzilla.kernel.org/show_bug.cgi?id=8842
36741diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
36742index 26eb70c..4d66ddf 100644
36743--- a/drivers/acpi/video.c
36744+++ b/drivers/acpi/video.c
36745@@ -418,7 +418,7 @@ static int __init video_disable_native_backlight(const struct dmi_system_id *d)
36746 return 0;
36747 }
36748
36749-static struct dmi_system_id video_dmi_table[] __initdata = {
36750+static const struct dmi_system_id video_dmi_table[] __initconst = {
36751 /*
36752 * Broken _BQC workaround http://bugzilla.kernel.org/show_bug.cgi?id=13121
36753 */
36754diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
36755index 287c4ba..6a600bc 100644
36756--- a/drivers/ata/libahci.c
36757+++ b/drivers/ata/libahci.c
36758@@ -1252,7 +1252,7 @@ int ahci_kick_engine(struct ata_port *ap)
36759 }
36760 EXPORT_SYMBOL_GPL(ahci_kick_engine);
36761
36762-static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
36763+static int __intentional_overflow(-1) ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
36764 struct ata_taskfile *tf, int is_cmd, u16 flags,
36765 unsigned long timeout_msec)
36766 {
36767diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
36768index 87b4b7f..d876fbd 100644
36769--- a/drivers/ata/libata-core.c
36770+++ b/drivers/ata/libata-core.c
36771@@ -99,7 +99,7 @@ static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
36772 static void ata_dev_xfermask(struct ata_device *dev);
36773 static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
36774
36775-atomic_t ata_print_id = ATOMIC_INIT(0);
36776+atomic_unchecked_t ata_print_id = ATOMIC_INIT(0);
36777
36778 struct ata_force_param {
36779 const char *name;
36780@@ -4780,7 +4780,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
36781 struct ata_port *ap;
36782 unsigned int tag;
36783
36784- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
36785+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
36786 ap = qc->ap;
36787
36788 qc->flags = 0;
36789@@ -4797,7 +4797,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
36790 struct ata_port *ap;
36791 struct ata_link *link;
36792
36793- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
36794+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
36795 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
36796 ap = qc->ap;
36797 link = qc->dev->link;
36798@@ -5901,6 +5901,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
36799 return;
36800
36801 spin_lock(&lock);
36802+ pax_open_kernel();
36803
36804 for (cur = ops->inherits; cur; cur = cur->inherits) {
36805 void **inherit = (void **)cur;
36806@@ -5914,8 +5915,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
36807 if (IS_ERR(*pp))
36808 *pp = NULL;
36809
36810- ops->inherits = NULL;
36811+ *(struct ata_port_operations **)&ops->inherits = NULL;
36812
36813+ pax_close_kernel();
36814 spin_unlock(&lock);
36815 }
36816
36817@@ -6111,7 +6113,7 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
36818
36819 /* give ports names and add SCSI hosts */
36820 for (i = 0; i < host->n_ports; i++) {
36821- host->ports[i]->print_id = atomic_inc_return(&ata_print_id);
36822+ host->ports[i]->print_id = atomic_inc_return_unchecked(&ata_print_id);
36823 host->ports[i]->local_port_no = i + 1;
36824 }
36825
36826diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
36827index b061ba2..fdcd85f 100644
36828--- a/drivers/ata/libata-scsi.c
36829+++ b/drivers/ata/libata-scsi.c
36830@@ -4172,7 +4172,7 @@ int ata_sas_port_init(struct ata_port *ap)
36831
36832 if (rc)
36833 return rc;
36834- ap->print_id = atomic_inc_return(&ata_print_id);
36835+ ap->print_id = atomic_inc_return_unchecked(&ata_print_id);
36836 return 0;
36837 }
36838 EXPORT_SYMBOL_GPL(ata_sas_port_init);
36839diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
36840index f840ca1..edd6ef3 100644
36841--- a/drivers/ata/libata.h
36842+++ b/drivers/ata/libata.h
36843@@ -53,7 +53,7 @@ enum {
36844 ATA_DNXFER_QUIET = (1 << 31),
36845 };
36846
36847-extern atomic_t ata_print_id;
36848+extern atomic_unchecked_t ata_print_id;
36849 extern int atapi_passthru16;
36850 extern int libata_fua;
36851 extern int libata_noacpi;
36852diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
36853index a9b0c82..207d97d 100644
36854--- a/drivers/ata/pata_arasan_cf.c
36855+++ b/drivers/ata/pata_arasan_cf.c
36856@@ -865,7 +865,9 @@ static int arasan_cf_probe(struct platform_device *pdev)
36857 /* Handle platform specific quirks */
36858 if (quirk) {
36859 if (quirk & CF_BROKEN_PIO) {
36860- ap->ops->set_piomode = NULL;
36861+ pax_open_kernel();
36862+ *(void **)&ap->ops->set_piomode = NULL;
36863+ pax_close_kernel();
36864 ap->pio_mask = 0;
36865 }
36866 if (quirk & CF_BROKEN_MWDMA)
36867diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
36868index f9b983a..887b9d8 100644
36869--- a/drivers/atm/adummy.c
36870+++ b/drivers/atm/adummy.c
36871@@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb)
36872 vcc->pop(vcc, skb);
36873 else
36874 dev_kfree_skb_any(skb);
36875- atomic_inc(&vcc->stats->tx);
36876+ atomic_inc_unchecked(&vcc->stats->tx);
36877
36878 return 0;
36879 }
36880diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
36881index f1a9198..f466a4a 100644
36882--- a/drivers/atm/ambassador.c
36883+++ b/drivers/atm/ambassador.c
36884@@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) {
36885 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
36886
36887 // VC layer stats
36888- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
36889+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
36890
36891 // free the descriptor
36892 kfree (tx_descr);
36893@@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
36894 dump_skb ("<<<", vc, skb);
36895
36896 // VC layer stats
36897- atomic_inc(&atm_vcc->stats->rx);
36898+ atomic_inc_unchecked(&atm_vcc->stats->rx);
36899 __net_timestamp(skb);
36900 // end of our responsibility
36901 atm_vcc->push (atm_vcc, skb);
36902@@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
36903 } else {
36904 PRINTK (KERN_INFO, "dropped over-size frame");
36905 // should we count this?
36906- atomic_inc(&atm_vcc->stats->rx_drop);
36907+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
36908 }
36909
36910 } else {
36911@@ -1338,7 +1338,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
36912 }
36913
36914 if (check_area (skb->data, skb->len)) {
36915- atomic_inc(&atm_vcc->stats->tx_err);
36916+ atomic_inc_unchecked(&atm_vcc->stats->tx_err);
36917 return -ENOMEM; // ?
36918 }
36919
36920diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
36921index 480fa6f..947067c 100644
36922--- a/drivers/atm/atmtcp.c
36923+++ b/drivers/atm/atmtcp.c
36924@@ -206,7 +206,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
36925 if (vcc->pop) vcc->pop(vcc,skb);
36926 else dev_kfree_skb(skb);
36927 if (dev_data) return 0;
36928- atomic_inc(&vcc->stats->tx_err);
36929+ atomic_inc_unchecked(&vcc->stats->tx_err);
36930 return -ENOLINK;
36931 }
36932 size = skb->len+sizeof(struct atmtcp_hdr);
36933@@ -214,7 +214,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
36934 if (!new_skb) {
36935 if (vcc->pop) vcc->pop(vcc,skb);
36936 else dev_kfree_skb(skb);
36937- atomic_inc(&vcc->stats->tx_err);
36938+ atomic_inc_unchecked(&vcc->stats->tx_err);
36939 return -ENOBUFS;
36940 }
36941 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
36942@@ -225,8 +225,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
36943 if (vcc->pop) vcc->pop(vcc,skb);
36944 else dev_kfree_skb(skb);
36945 out_vcc->push(out_vcc,new_skb);
36946- atomic_inc(&vcc->stats->tx);
36947- atomic_inc(&out_vcc->stats->rx);
36948+ atomic_inc_unchecked(&vcc->stats->tx);
36949+ atomic_inc_unchecked(&out_vcc->stats->rx);
36950 return 0;
36951 }
36952
36953@@ -300,7 +300,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
36954 read_unlock(&vcc_sklist_lock);
36955 if (!out_vcc) {
36956 result = -EUNATCH;
36957- atomic_inc(&vcc->stats->tx_err);
36958+ atomic_inc_unchecked(&vcc->stats->tx_err);
36959 goto done;
36960 }
36961 skb_pull(skb,sizeof(struct atmtcp_hdr));
36962@@ -312,8 +312,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
36963 __net_timestamp(new_skb);
36964 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
36965 out_vcc->push(out_vcc,new_skb);
36966- atomic_inc(&vcc->stats->tx);
36967- atomic_inc(&out_vcc->stats->rx);
36968+ atomic_inc_unchecked(&vcc->stats->tx);
36969+ atomic_inc_unchecked(&out_vcc->stats->rx);
36970 done:
36971 if (vcc->pop) vcc->pop(vcc,skb);
36972 else dev_kfree_skb(skb);
36973diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
36974index 6339efd..2b441d5 100644
36975--- a/drivers/atm/eni.c
36976+++ b/drivers/atm/eni.c
36977@@ -525,7 +525,7 @@ static int rx_aal0(struct atm_vcc *vcc)
36978 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
36979 vcc->dev->number);
36980 length = 0;
36981- atomic_inc(&vcc->stats->rx_err);
36982+ atomic_inc_unchecked(&vcc->stats->rx_err);
36983 }
36984 else {
36985 length = ATM_CELL_SIZE-1; /* no HEC */
36986@@ -580,7 +580,7 @@ static int rx_aal5(struct atm_vcc *vcc)
36987 size);
36988 }
36989 eff = length = 0;
36990- atomic_inc(&vcc->stats->rx_err);
36991+ atomic_inc_unchecked(&vcc->stats->rx_err);
36992 }
36993 else {
36994 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
36995@@ -597,7 +597,7 @@ static int rx_aal5(struct atm_vcc *vcc)
36996 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
36997 vcc->dev->number,vcc->vci,length,size << 2,descr);
36998 length = eff = 0;
36999- atomic_inc(&vcc->stats->rx_err);
37000+ atomic_inc_unchecked(&vcc->stats->rx_err);
37001 }
37002 }
37003 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
37004@@ -770,7 +770,7 @@ rx_dequeued++;
37005 vcc->push(vcc,skb);
37006 pushed++;
37007 }
37008- atomic_inc(&vcc->stats->rx);
37009+ atomic_inc_unchecked(&vcc->stats->rx);
37010 }
37011 wake_up(&eni_dev->rx_wait);
37012 }
37013@@ -1230,7 +1230,7 @@ static void dequeue_tx(struct atm_dev *dev)
37014 DMA_TO_DEVICE);
37015 if (vcc->pop) vcc->pop(vcc,skb);
37016 else dev_kfree_skb_irq(skb);
37017- atomic_inc(&vcc->stats->tx);
37018+ atomic_inc_unchecked(&vcc->stats->tx);
37019 wake_up(&eni_dev->tx_wait);
37020 dma_complete++;
37021 }
37022diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
37023index 82f2ae0..f205c02 100644
37024--- a/drivers/atm/firestream.c
37025+++ b/drivers/atm/firestream.c
37026@@ -749,7 +749,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
37027 }
37028 }
37029
37030- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
37031+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
37032
37033 fs_dprintk (FS_DEBUG_TXMEM, "i");
37034 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
37035@@ -816,7 +816,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
37036 #endif
37037 skb_put (skb, qe->p1 & 0xffff);
37038 ATM_SKB(skb)->vcc = atm_vcc;
37039- atomic_inc(&atm_vcc->stats->rx);
37040+ atomic_inc_unchecked(&atm_vcc->stats->rx);
37041 __net_timestamp(skb);
37042 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
37043 atm_vcc->push (atm_vcc, skb);
37044@@ -837,12 +837,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
37045 kfree (pe);
37046 }
37047 if (atm_vcc)
37048- atomic_inc(&atm_vcc->stats->rx_drop);
37049+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
37050 break;
37051 case 0x1f: /* Reassembly abort: no buffers. */
37052 /* Silently increment error counter. */
37053 if (atm_vcc)
37054- atomic_inc(&atm_vcc->stats->rx_drop);
37055+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
37056 break;
37057 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
37058 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
37059diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
37060index 75dde90..4309ead 100644
37061--- a/drivers/atm/fore200e.c
37062+++ b/drivers/atm/fore200e.c
37063@@ -932,9 +932,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
37064 #endif
37065 /* check error condition */
37066 if (*entry->status & STATUS_ERROR)
37067- atomic_inc(&vcc->stats->tx_err);
37068+ atomic_inc_unchecked(&vcc->stats->tx_err);
37069 else
37070- atomic_inc(&vcc->stats->tx);
37071+ atomic_inc_unchecked(&vcc->stats->tx);
37072 }
37073 }
37074
37075@@ -1083,7 +1083,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
37076 if (skb == NULL) {
37077 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
37078
37079- atomic_inc(&vcc->stats->rx_drop);
37080+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37081 return -ENOMEM;
37082 }
37083
37084@@ -1126,14 +1126,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
37085
37086 dev_kfree_skb_any(skb);
37087
37088- atomic_inc(&vcc->stats->rx_drop);
37089+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37090 return -ENOMEM;
37091 }
37092
37093 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
37094
37095 vcc->push(vcc, skb);
37096- atomic_inc(&vcc->stats->rx);
37097+ atomic_inc_unchecked(&vcc->stats->rx);
37098
37099 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
37100
37101@@ -1211,7 +1211,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
37102 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
37103 fore200e->atm_dev->number,
37104 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
37105- atomic_inc(&vcc->stats->rx_err);
37106+ atomic_inc_unchecked(&vcc->stats->rx_err);
37107 }
37108 }
37109
37110@@ -1656,7 +1656,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
37111 goto retry_here;
37112 }
37113
37114- atomic_inc(&vcc->stats->tx_err);
37115+ atomic_inc_unchecked(&vcc->stats->tx_err);
37116
37117 fore200e->tx_sat++;
37118 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
37119diff --git a/drivers/atm/he.c b/drivers/atm/he.c
37120index 93dca2e..c5daa69 100644
37121--- a/drivers/atm/he.c
37122+++ b/drivers/atm/he.c
37123@@ -1692,7 +1692,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
37124
37125 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
37126 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
37127- atomic_inc(&vcc->stats->rx_drop);
37128+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37129 goto return_host_buffers;
37130 }
37131
37132@@ -1719,7 +1719,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
37133 RBRQ_LEN_ERR(he_dev->rbrq_head)
37134 ? "LEN_ERR" : "",
37135 vcc->vpi, vcc->vci);
37136- atomic_inc(&vcc->stats->rx_err);
37137+ atomic_inc_unchecked(&vcc->stats->rx_err);
37138 goto return_host_buffers;
37139 }
37140
37141@@ -1771,7 +1771,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
37142 vcc->push(vcc, skb);
37143 spin_lock(&he_dev->global_lock);
37144
37145- atomic_inc(&vcc->stats->rx);
37146+ atomic_inc_unchecked(&vcc->stats->rx);
37147
37148 return_host_buffers:
37149 ++pdus_assembled;
37150@@ -2097,7 +2097,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
37151 tpd->vcc->pop(tpd->vcc, tpd->skb);
37152 else
37153 dev_kfree_skb_any(tpd->skb);
37154- atomic_inc(&tpd->vcc->stats->tx_err);
37155+ atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
37156 }
37157 dma_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
37158 return;
37159@@ -2509,7 +2509,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
37160 vcc->pop(vcc, skb);
37161 else
37162 dev_kfree_skb_any(skb);
37163- atomic_inc(&vcc->stats->tx_err);
37164+ atomic_inc_unchecked(&vcc->stats->tx_err);
37165 return -EINVAL;
37166 }
37167
37168@@ -2520,7 +2520,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
37169 vcc->pop(vcc, skb);
37170 else
37171 dev_kfree_skb_any(skb);
37172- atomic_inc(&vcc->stats->tx_err);
37173+ atomic_inc_unchecked(&vcc->stats->tx_err);
37174 return -EINVAL;
37175 }
37176 #endif
37177@@ -2532,7 +2532,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
37178 vcc->pop(vcc, skb);
37179 else
37180 dev_kfree_skb_any(skb);
37181- atomic_inc(&vcc->stats->tx_err);
37182+ atomic_inc_unchecked(&vcc->stats->tx_err);
37183 spin_unlock_irqrestore(&he_dev->global_lock, flags);
37184 return -ENOMEM;
37185 }
37186@@ -2574,7 +2574,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
37187 vcc->pop(vcc, skb);
37188 else
37189 dev_kfree_skb_any(skb);
37190- atomic_inc(&vcc->stats->tx_err);
37191+ atomic_inc_unchecked(&vcc->stats->tx_err);
37192 spin_unlock_irqrestore(&he_dev->global_lock, flags);
37193 return -ENOMEM;
37194 }
37195@@ -2605,7 +2605,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
37196 __enqueue_tpd(he_dev, tpd, cid);
37197 spin_unlock_irqrestore(&he_dev->global_lock, flags);
37198
37199- atomic_inc(&vcc->stats->tx);
37200+ atomic_inc_unchecked(&vcc->stats->tx);
37201
37202 return 0;
37203 }
37204diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
37205index 527bbd5..96570c8 100644
37206--- a/drivers/atm/horizon.c
37207+++ b/drivers/atm/horizon.c
37208@@ -1018,7 +1018,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
37209 {
37210 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
37211 // VC layer stats
37212- atomic_inc(&vcc->stats->rx);
37213+ atomic_inc_unchecked(&vcc->stats->rx);
37214 __net_timestamp(skb);
37215 // end of our responsibility
37216 vcc->push (vcc, skb);
37217@@ -1170,7 +1170,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
37218 dev->tx_iovec = NULL;
37219
37220 // VC layer stats
37221- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
37222+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
37223
37224 // free the skb
37225 hrz_kfree_skb (skb);
37226diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
37227index 074616b..d6b3d5f 100644
37228--- a/drivers/atm/idt77252.c
37229+++ b/drivers/atm/idt77252.c
37230@@ -811,7 +811,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
37231 else
37232 dev_kfree_skb(skb);
37233
37234- atomic_inc(&vcc->stats->tx);
37235+ atomic_inc_unchecked(&vcc->stats->tx);
37236 }
37237
37238 atomic_dec(&scq->used);
37239@@ -1073,13 +1073,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
37240 if ((sb = dev_alloc_skb(64)) == NULL) {
37241 printk("%s: Can't allocate buffers for aal0.\n",
37242 card->name);
37243- atomic_add(i, &vcc->stats->rx_drop);
37244+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
37245 break;
37246 }
37247 if (!atm_charge(vcc, sb->truesize)) {
37248 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
37249 card->name);
37250- atomic_add(i - 1, &vcc->stats->rx_drop);
37251+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
37252 dev_kfree_skb(sb);
37253 break;
37254 }
37255@@ -1096,7 +1096,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
37256 ATM_SKB(sb)->vcc = vcc;
37257 __net_timestamp(sb);
37258 vcc->push(vcc, sb);
37259- atomic_inc(&vcc->stats->rx);
37260+ atomic_inc_unchecked(&vcc->stats->rx);
37261
37262 cell += ATM_CELL_PAYLOAD;
37263 }
37264@@ -1133,13 +1133,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
37265 "(CDC: %08x)\n",
37266 card->name, len, rpp->len, readl(SAR_REG_CDC));
37267 recycle_rx_pool_skb(card, rpp);
37268- atomic_inc(&vcc->stats->rx_err);
37269+ atomic_inc_unchecked(&vcc->stats->rx_err);
37270 return;
37271 }
37272 if (stat & SAR_RSQE_CRC) {
37273 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
37274 recycle_rx_pool_skb(card, rpp);
37275- atomic_inc(&vcc->stats->rx_err);
37276+ atomic_inc_unchecked(&vcc->stats->rx_err);
37277 return;
37278 }
37279 if (skb_queue_len(&rpp->queue) > 1) {
37280@@ -1150,7 +1150,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
37281 RXPRINTK("%s: Can't alloc RX skb.\n",
37282 card->name);
37283 recycle_rx_pool_skb(card, rpp);
37284- atomic_inc(&vcc->stats->rx_err);
37285+ atomic_inc_unchecked(&vcc->stats->rx_err);
37286 return;
37287 }
37288 if (!atm_charge(vcc, skb->truesize)) {
37289@@ -1169,7 +1169,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
37290 __net_timestamp(skb);
37291
37292 vcc->push(vcc, skb);
37293- atomic_inc(&vcc->stats->rx);
37294+ atomic_inc_unchecked(&vcc->stats->rx);
37295
37296 return;
37297 }
37298@@ -1191,7 +1191,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
37299 __net_timestamp(skb);
37300
37301 vcc->push(vcc, skb);
37302- atomic_inc(&vcc->stats->rx);
37303+ atomic_inc_unchecked(&vcc->stats->rx);
37304
37305 if (skb->truesize > SAR_FB_SIZE_3)
37306 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
37307@@ -1302,14 +1302,14 @@ idt77252_rx_raw(struct idt77252_dev *card)
37308 if (vcc->qos.aal != ATM_AAL0) {
37309 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
37310 card->name, vpi, vci);
37311- atomic_inc(&vcc->stats->rx_drop);
37312+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37313 goto drop;
37314 }
37315
37316 if ((sb = dev_alloc_skb(64)) == NULL) {
37317 printk("%s: Can't allocate buffers for AAL0.\n",
37318 card->name);
37319- atomic_inc(&vcc->stats->rx_err);
37320+ atomic_inc_unchecked(&vcc->stats->rx_err);
37321 goto drop;
37322 }
37323
37324@@ -1328,7 +1328,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
37325 ATM_SKB(sb)->vcc = vcc;
37326 __net_timestamp(sb);
37327 vcc->push(vcc, sb);
37328- atomic_inc(&vcc->stats->rx);
37329+ atomic_inc_unchecked(&vcc->stats->rx);
37330
37331 drop:
37332 skb_pull(queue, 64);
37333@@ -1953,13 +1953,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
37334
37335 if (vc == NULL) {
37336 printk("%s: NULL connection in send().\n", card->name);
37337- atomic_inc(&vcc->stats->tx_err);
37338+ atomic_inc_unchecked(&vcc->stats->tx_err);
37339 dev_kfree_skb(skb);
37340 return -EINVAL;
37341 }
37342 if (!test_bit(VCF_TX, &vc->flags)) {
37343 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
37344- atomic_inc(&vcc->stats->tx_err);
37345+ atomic_inc_unchecked(&vcc->stats->tx_err);
37346 dev_kfree_skb(skb);
37347 return -EINVAL;
37348 }
37349@@ -1971,14 +1971,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
37350 break;
37351 default:
37352 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
37353- atomic_inc(&vcc->stats->tx_err);
37354+ atomic_inc_unchecked(&vcc->stats->tx_err);
37355 dev_kfree_skb(skb);
37356 return -EINVAL;
37357 }
37358
37359 if (skb_shinfo(skb)->nr_frags != 0) {
37360 printk("%s: No scatter-gather yet.\n", card->name);
37361- atomic_inc(&vcc->stats->tx_err);
37362+ atomic_inc_unchecked(&vcc->stats->tx_err);
37363 dev_kfree_skb(skb);
37364 return -EINVAL;
37365 }
37366@@ -1986,7 +1986,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
37367
37368 err = queue_skb(card, vc, skb, oam);
37369 if (err) {
37370- atomic_inc(&vcc->stats->tx_err);
37371+ atomic_inc_unchecked(&vcc->stats->tx_err);
37372 dev_kfree_skb(skb);
37373 return err;
37374 }
37375@@ -2009,7 +2009,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
37376 skb = dev_alloc_skb(64);
37377 if (!skb) {
37378 printk("%s: Out of memory in send_oam().\n", card->name);
37379- atomic_inc(&vcc->stats->tx_err);
37380+ atomic_inc_unchecked(&vcc->stats->tx_err);
37381 return -ENOMEM;
37382 }
37383 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
37384diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
37385index 924f8e2..3375a3e 100644
37386--- a/drivers/atm/iphase.c
37387+++ b/drivers/atm/iphase.c
37388@@ -1145,7 +1145,7 @@ static int rx_pkt(struct atm_dev *dev)
37389 status = (u_short) (buf_desc_ptr->desc_mode);
37390 if (status & (RX_CER | RX_PTE | RX_OFL))
37391 {
37392- atomic_inc(&vcc->stats->rx_err);
37393+ atomic_inc_unchecked(&vcc->stats->rx_err);
37394 IF_ERR(printk("IA: bad packet, dropping it");)
37395 if (status & RX_CER) {
37396 IF_ERR(printk(" cause: packet CRC error\n");)
37397@@ -1168,7 +1168,7 @@ static int rx_pkt(struct atm_dev *dev)
37398 len = dma_addr - buf_addr;
37399 if (len > iadev->rx_buf_sz) {
37400 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
37401- atomic_inc(&vcc->stats->rx_err);
37402+ atomic_inc_unchecked(&vcc->stats->rx_err);
37403 goto out_free_desc;
37404 }
37405
37406@@ -1318,7 +1318,7 @@ static void rx_dle_intr(struct atm_dev *dev)
37407 ia_vcc = INPH_IA_VCC(vcc);
37408 if (ia_vcc == NULL)
37409 {
37410- atomic_inc(&vcc->stats->rx_err);
37411+ atomic_inc_unchecked(&vcc->stats->rx_err);
37412 atm_return(vcc, skb->truesize);
37413 dev_kfree_skb_any(skb);
37414 goto INCR_DLE;
37415@@ -1330,7 +1330,7 @@ static void rx_dle_intr(struct atm_dev *dev)
37416 if ((length > iadev->rx_buf_sz) || (length >
37417 (skb->len - sizeof(struct cpcs_trailer))))
37418 {
37419- atomic_inc(&vcc->stats->rx_err);
37420+ atomic_inc_unchecked(&vcc->stats->rx_err);
37421 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
37422 length, skb->len);)
37423 atm_return(vcc, skb->truesize);
37424@@ -1346,7 +1346,7 @@ static void rx_dle_intr(struct atm_dev *dev)
37425
37426 IF_RX(printk("rx_dle_intr: skb push");)
37427 vcc->push(vcc,skb);
37428- atomic_inc(&vcc->stats->rx);
37429+ atomic_inc_unchecked(&vcc->stats->rx);
37430 iadev->rx_pkt_cnt++;
37431 }
37432 INCR_DLE:
37433@@ -2828,15 +2828,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
37434 {
37435 struct k_sonet_stats *stats;
37436 stats = &PRIV(_ia_dev[board])->sonet_stats;
37437- printk("section_bip: %d\n", atomic_read(&stats->section_bip));
37438- printk("line_bip : %d\n", atomic_read(&stats->line_bip));
37439- printk("path_bip : %d\n", atomic_read(&stats->path_bip));
37440- printk("line_febe : %d\n", atomic_read(&stats->line_febe));
37441- printk("path_febe : %d\n", atomic_read(&stats->path_febe));
37442- printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
37443- printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
37444- printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
37445- printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
37446+ printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
37447+ printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
37448+ printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
37449+ printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
37450+ printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
37451+ printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
37452+ printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
37453+ printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
37454+ printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
37455 }
37456 ia_cmds.status = 0;
37457 break;
37458@@ -2941,7 +2941,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
37459 if ((desc == 0) || (desc > iadev->num_tx_desc))
37460 {
37461 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
37462- atomic_inc(&vcc->stats->tx);
37463+ atomic_inc_unchecked(&vcc->stats->tx);
37464 if (vcc->pop)
37465 vcc->pop(vcc, skb);
37466 else
37467@@ -3046,14 +3046,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
37468 ATM_DESC(skb) = vcc->vci;
37469 skb_queue_tail(&iadev->tx_dma_q, skb);
37470
37471- atomic_inc(&vcc->stats->tx);
37472+ atomic_inc_unchecked(&vcc->stats->tx);
37473 iadev->tx_pkt_cnt++;
37474 /* Increment transaction counter */
37475 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
37476
37477 #if 0
37478 /* add flow control logic */
37479- if (atomic_read(&vcc->stats->tx) % 20 == 0) {
37480+ if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
37481 if (iavcc->vc_desc_cnt > 10) {
37482 vcc->tx_quota = vcc->tx_quota * 3 / 4;
37483 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
37484diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
37485index ce43ae3..969de38 100644
37486--- a/drivers/atm/lanai.c
37487+++ b/drivers/atm/lanai.c
37488@@ -1295,7 +1295,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai,
37489 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
37490 lanai_endtx(lanai, lvcc);
37491 lanai_free_skb(lvcc->tx.atmvcc, skb);
37492- atomic_inc(&lvcc->tx.atmvcc->stats->tx);
37493+ atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
37494 }
37495
37496 /* Try to fill the buffer - don't call unless there is backlog */
37497@@ -1418,7 +1418,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
37498 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
37499 __net_timestamp(skb);
37500 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
37501- atomic_inc(&lvcc->rx.atmvcc->stats->rx);
37502+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
37503 out:
37504 lvcc->rx.buf.ptr = end;
37505 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
37506@@ -1659,7 +1659,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
37507 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
37508 "vcc %d\n", lanai->number, (unsigned int) s, vci);
37509 lanai->stats.service_rxnotaal5++;
37510- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
37511+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
37512 return 0;
37513 }
37514 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
37515@@ -1671,7 +1671,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
37516 int bytes;
37517 read_unlock(&vcc_sklist_lock);
37518 DPRINTK("got trashed rx pdu on vci %d\n", vci);
37519- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
37520+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
37521 lvcc->stats.x.aal5.service_trash++;
37522 bytes = (SERVICE_GET_END(s) * 16) -
37523 (((unsigned long) lvcc->rx.buf.ptr) -
37524@@ -1683,7 +1683,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
37525 }
37526 if (s & SERVICE_STREAM) {
37527 read_unlock(&vcc_sklist_lock);
37528- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
37529+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
37530 lvcc->stats.x.aal5.service_stream++;
37531 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
37532 "PDU on VCI %d!\n", lanai->number, vci);
37533@@ -1691,7 +1691,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
37534 return 0;
37535 }
37536 DPRINTK("got rx crc error on vci %d\n", vci);
37537- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
37538+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
37539 lvcc->stats.x.aal5.service_rxcrc++;
37540 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
37541 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
37542diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
37543index b7e1cc0..eb336bfe 100644
37544--- a/drivers/atm/nicstar.c
37545+++ b/drivers/atm/nicstar.c
37546@@ -1640,7 +1640,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
37547 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
37548 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
37549 card->index);
37550- atomic_inc(&vcc->stats->tx_err);
37551+ atomic_inc_unchecked(&vcc->stats->tx_err);
37552 dev_kfree_skb_any(skb);
37553 return -EINVAL;
37554 }
37555@@ -1648,7 +1648,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
37556 if (!vc->tx) {
37557 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
37558 card->index);
37559- atomic_inc(&vcc->stats->tx_err);
37560+ atomic_inc_unchecked(&vcc->stats->tx_err);
37561 dev_kfree_skb_any(skb);
37562 return -EINVAL;
37563 }
37564@@ -1656,14 +1656,14 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
37565 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
37566 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
37567 card->index);
37568- atomic_inc(&vcc->stats->tx_err);
37569+ atomic_inc_unchecked(&vcc->stats->tx_err);
37570 dev_kfree_skb_any(skb);
37571 return -EINVAL;
37572 }
37573
37574 if (skb_shinfo(skb)->nr_frags != 0) {
37575 printk("nicstar%d: No scatter-gather yet.\n", card->index);
37576- atomic_inc(&vcc->stats->tx_err);
37577+ atomic_inc_unchecked(&vcc->stats->tx_err);
37578 dev_kfree_skb_any(skb);
37579 return -EINVAL;
37580 }
37581@@ -1711,11 +1711,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
37582 }
37583
37584 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
37585- atomic_inc(&vcc->stats->tx_err);
37586+ atomic_inc_unchecked(&vcc->stats->tx_err);
37587 dev_kfree_skb_any(skb);
37588 return -EIO;
37589 }
37590- atomic_inc(&vcc->stats->tx);
37591+ atomic_inc_unchecked(&vcc->stats->tx);
37592
37593 return 0;
37594 }
37595@@ -2032,14 +2032,14 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37596 printk
37597 ("nicstar%d: Can't allocate buffers for aal0.\n",
37598 card->index);
37599- atomic_add(i, &vcc->stats->rx_drop);
37600+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
37601 break;
37602 }
37603 if (!atm_charge(vcc, sb->truesize)) {
37604 RXPRINTK
37605 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
37606 card->index);
37607- atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
37608+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
37609 dev_kfree_skb_any(sb);
37610 break;
37611 }
37612@@ -2054,7 +2054,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37613 ATM_SKB(sb)->vcc = vcc;
37614 __net_timestamp(sb);
37615 vcc->push(vcc, sb);
37616- atomic_inc(&vcc->stats->rx);
37617+ atomic_inc_unchecked(&vcc->stats->rx);
37618 cell += ATM_CELL_PAYLOAD;
37619 }
37620
37621@@ -2071,7 +2071,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37622 if (iovb == NULL) {
37623 printk("nicstar%d: Out of iovec buffers.\n",
37624 card->index);
37625- atomic_inc(&vcc->stats->rx_drop);
37626+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37627 recycle_rx_buf(card, skb);
37628 return;
37629 }
37630@@ -2095,7 +2095,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37631 small or large buffer itself. */
37632 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
37633 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
37634- atomic_inc(&vcc->stats->rx_err);
37635+ atomic_inc_unchecked(&vcc->stats->rx_err);
37636 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
37637 NS_MAX_IOVECS);
37638 NS_PRV_IOVCNT(iovb) = 0;
37639@@ -2115,7 +2115,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37640 ("nicstar%d: Expected a small buffer, and this is not one.\n",
37641 card->index);
37642 which_list(card, skb);
37643- atomic_inc(&vcc->stats->rx_err);
37644+ atomic_inc_unchecked(&vcc->stats->rx_err);
37645 recycle_rx_buf(card, skb);
37646 vc->rx_iov = NULL;
37647 recycle_iov_buf(card, iovb);
37648@@ -2128,7 +2128,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37649 ("nicstar%d: Expected a large buffer, and this is not one.\n",
37650 card->index);
37651 which_list(card, skb);
37652- atomic_inc(&vcc->stats->rx_err);
37653+ atomic_inc_unchecked(&vcc->stats->rx_err);
37654 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
37655 NS_PRV_IOVCNT(iovb));
37656 vc->rx_iov = NULL;
37657@@ -2151,7 +2151,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37658 printk(" - PDU size mismatch.\n");
37659 else
37660 printk(".\n");
37661- atomic_inc(&vcc->stats->rx_err);
37662+ atomic_inc_unchecked(&vcc->stats->rx_err);
37663 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
37664 NS_PRV_IOVCNT(iovb));
37665 vc->rx_iov = NULL;
37666@@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37667 /* skb points to a small buffer */
37668 if (!atm_charge(vcc, skb->truesize)) {
37669 push_rxbufs(card, skb);
37670- atomic_inc(&vcc->stats->rx_drop);
37671+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37672 } else {
37673 skb_put(skb, len);
37674 dequeue_sm_buf(card, skb);
37675@@ -2175,7 +2175,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37676 ATM_SKB(skb)->vcc = vcc;
37677 __net_timestamp(skb);
37678 vcc->push(vcc, skb);
37679- atomic_inc(&vcc->stats->rx);
37680+ atomic_inc_unchecked(&vcc->stats->rx);
37681 }
37682 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
37683 struct sk_buff *sb;
37684@@ -2186,7 +2186,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37685 if (len <= NS_SMBUFSIZE) {
37686 if (!atm_charge(vcc, sb->truesize)) {
37687 push_rxbufs(card, sb);
37688- atomic_inc(&vcc->stats->rx_drop);
37689+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37690 } else {
37691 skb_put(sb, len);
37692 dequeue_sm_buf(card, sb);
37693@@ -2196,7 +2196,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37694 ATM_SKB(sb)->vcc = vcc;
37695 __net_timestamp(sb);
37696 vcc->push(vcc, sb);
37697- atomic_inc(&vcc->stats->rx);
37698+ atomic_inc_unchecked(&vcc->stats->rx);
37699 }
37700
37701 push_rxbufs(card, skb);
37702@@ -2205,7 +2205,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37703
37704 if (!atm_charge(vcc, skb->truesize)) {
37705 push_rxbufs(card, skb);
37706- atomic_inc(&vcc->stats->rx_drop);
37707+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37708 } else {
37709 dequeue_lg_buf(card, skb);
37710 #ifdef NS_USE_DESTRUCTORS
37711@@ -2218,7 +2218,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37712 ATM_SKB(skb)->vcc = vcc;
37713 __net_timestamp(skb);
37714 vcc->push(vcc, skb);
37715- atomic_inc(&vcc->stats->rx);
37716+ atomic_inc_unchecked(&vcc->stats->rx);
37717 }
37718
37719 push_rxbufs(card, sb);
37720@@ -2239,7 +2239,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37721 printk
37722 ("nicstar%d: Out of huge buffers.\n",
37723 card->index);
37724- atomic_inc(&vcc->stats->rx_drop);
37725+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37726 recycle_iovec_rx_bufs(card,
37727 (struct iovec *)
37728 iovb->data,
37729@@ -2290,7 +2290,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37730 card->hbpool.count++;
37731 } else
37732 dev_kfree_skb_any(hb);
37733- atomic_inc(&vcc->stats->rx_drop);
37734+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37735 } else {
37736 /* Copy the small buffer to the huge buffer */
37737 sb = (struct sk_buff *)iov->iov_base;
37738@@ -2327,7 +2327,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37739 #endif /* NS_USE_DESTRUCTORS */
37740 __net_timestamp(hb);
37741 vcc->push(vcc, hb);
37742- atomic_inc(&vcc->stats->rx);
37743+ atomic_inc_unchecked(&vcc->stats->rx);
37744 }
37745 }
37746
37747diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
37748index 74e18b0..f16afa0 100644
37749--- a/drivers/atm/solos-pci.c
37750+++ b/drivers/atm/solos-pci.c
37751@@ -838,7 +838,7 @@ static void solos_bh(unsigned long card_arg)
37752 }
37753 atm_charge(vcc, skb->truesize);
37754 vcc->push(vcc, skb);
37755- atomic_inc(&vcc->stats->rx);
37756+ atomic_inc_unchecked(&vcc->stats->rx);
37757 break;
37758
37759 case PKT_STATUS:
37760@@ -1116,7 +1116,7 @@ static uint32_t fpga_tx(struct solos_card *card)
37761 vcc = SKB_CB(oldskb)->vcc;
37762
37763 if (vcc) {
37764- atomic_inc(&vcc->stats->tx);
37765+ atomic_inc_unchecked(&vcc->stats->tx);
37766 solos_pop(vcc, oldskb);
37767 } else {
37768 dev_kfree_skb_irq(oldskb);
37769diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
37770index 0215934..ce9f5b1 100644
37771--- a/drivers/atm/suni.c
37772+++ b/drivers/atm/suni.c
37773@@ -49,8 +49,8 @@ static DEFINE_SPINLOCK(sunis_lock);
37774
37775
37776 #define ADD_LIMITED(s,v) \
37777- atomic_add((v),&stats->s); \
37778- if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
37779+ atomic_add_unchecked((v),&stats->s); \
37780+ if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
37781
37782
37783 static void suni_hz(unsigned long from_timer)
37784diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
37785index 5120a96..e2572bd 100644
37786--- a/drivers/atm/uPD98402.c
37787+++ b/drivers/atm/uPD98402.c
37788@@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze
37789 struct sonet_stats tmp;
37790 int error = 0;
37791
37792- atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
37793+ atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
37794 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
37795 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
37796 if (zero && !error) {
37797@@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
37798
37799
37800 #define ADD_LIMITED(s,v) \
37801- { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
37802- if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
37803- atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
37804+ { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
37805+ if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
37806+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
37807
37808
37809 static void stat_event(struct atm_dev *dev)
37810@@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev *dev)
37811 if (reason & uPD98402_INT_PFM) stat_event(dev);
37812 if (reason & uPD98402_INT_PCO) {
37813 (void) GET(PCOCR); /* clear interrupt cause */
37814- atomic_add(GET(HECCT),
37815+ atomic_add_unchecked(GET(HECCT),
37816 &PRIV(dev)->sonet_stats.uncorr_hcs);
37817 }
37818 if ((reason & uPD98402_INT_RFO) &&
37819@@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev *dev)
37820 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
37821 uPD98402_INT_LOS),PIMR); /* enable them */
37822 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
37823- atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
37824- atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
37825- atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
37826+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
37827+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
37828+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
37829 return 0;
37830 }
37831
37832diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
37833index cecfb94..87009ec 100644
37834--- a/drivers/atm/zatm.c
37835+++ b/drivers/atm/zatm.c
37836@@ -459,7 +459,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
37837 }
37838 if (!size) {
37839 dev_kfree_skb_irq(skb);
37840- if (vcc) atomic_inc(&vcc->stats->rx_err);
37841+ if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
37842 continue;
37843 }
37844 if (!atm_charge(vcc,skb->truesize)) {
37845@@ -469,7 +469,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
37846 skb->len = size;
37847 ATM_SKB(skb)->vcc = vcc;
37848 vcc->push(vcc,skb);
37849- atomic_inc(&vcc->stats->rx);
37850+ atomic_inc_unchecked(&vcc->stats->rx);
37851 }
37852 zout(pos & 0xffff,MTA(mbx));
37853 #if 0 /* probably a stupid idea */
37854@@ -733,7 +733,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
37855 skb_queue_head(&zatm_vcc->backlog,skb);
37856 break;
37857 }
37858- atomic_inc(&vcc->stats->tx);
37859+ atomic_inc_unchecked(&vcc->stats->tx);
37860 wake_up(&zatm_vcc->tx_wait);
37861 }
37862
37863diff --git a/drivers/base/bus.c b/drivers/base/bus.c
37864index 79bc203..fa3945b 100644
37865--- a/drivers/base/bus.c
37866+++ b/drivers/base/bus.c
37867@@ -1126,7 +1126,7 @@ int subsys_interface_register(struct subsys_interface *sif)
37868 return -EINVAL;
37869
37870 mutex_lock(&subsys->p->mutex);
37871- list_add_tail(&sif->node, &subsys->p->interfaces);
37872+ pax_list_add_tail((struct list_head *)&sif->node, &subsys->p->interfaces);
37873 if (sif->add_dev) {
37874 subsys_dev_iter_init(&iter, subsys, NULL, NULL);
37875 while ((dev = subsys_dev_iter_next(&iter)))
37876@@ -1151,7 +1151,7 @@ void subsys_interface_unregister(struct subsys_interface *sif)
37877 subsys = sif->subsys;
37878
37879 mutex_lock(&subsys->p->mutex);
37880- list_del_init(&sif->node);
37881+ pax_list_del_init((struct list_head *)&sif->node);
37882 if (sif->remove_dev) {
37883 subsys_dev_iter_init(&iter, subsys, NULL, NULL);
37884 while ((dev = subsys_dev_iter_next(&iter)))
37885diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
37886index 25798db..15f130e 100644
37887--- a/drivers/base/devtmpfs.c
37888+++ b/drivers/base/devtmpfs.c
37889@@ -354,7 +354,7 @@ int devtmpfs_mount(const char *mntdir)
37890 if (!thread)
37891 return 0;
37892
37893- err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
37894+ err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)mntdir, (char __force_user *)"devtmpfs", MS_SILENT, NULL);
37895 if (err)
37896 printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
37897 else
37898@@ -380,11 +380,11 @@ static int devtmpfsd(void *p)
37899 *err = sys_unshare(CLONE_NEWNS);
37900 if (*err)
37901 goto out;
37902- *err = sys_mount("devtmpfs", "/", "devtmpfs", MS_SILENT, options);
37903+ *err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)"/", (char __force_user *)"devtmpfs", MS_SILENT, (char __force_user *)options);
37904 if (*err)
37905 goto out;
37906- sys_chdir("/.."); /* will traverse into overmounted root */
37907- sys_chroot(".");
37908+ sys_chdir((char __force_user *)"/.."); /* will traverse into overmounted root */
37909+ sys_chroot((char __force_user *)".");
37910 complete(&setup_done);
37911 while (1) {
37912 spin_lock(&req_lock);
37913diff --git a/drivers/base/node.c b/drivers/base/node.c
37914index 36fabe43..8cfc112 100644
37915--- a/drivers/base/node.c
37916+++ b/drivers/base/node.c
37917@@ -615,7 +615,7 @@ static ssize_t print_nodes_state(enum node_states state, char *buf)
37918 struct node_attr {
37919 struct device_attribute attr;
37920 enum node_states state;
37921-};
37922+} __do_const;
37923
37924 static ssize_t show_node_state(struct device *dev,
37925 struct device_attribute *attr, char *buf)
37926diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
37927index 45937f8..b9a342e 100644
37928--- a/drivers/base/power/domain.c
37929+++ b/drivers/base/power/domain.c
37930@@ -1698,7 +1698,7 @@ int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
37931 {
37932 struct cpuidle_driver *cpuidle_drv;
37933 struct gpd_cpuidle_data *cpuidle_data;
37934- struct cpuidle_state *idle_state;
37935+ cpuidle_state_no_const *idle_state;
37936 int ret = 0;
37937
37938 if (IS_ERR_OR_NULL(genpd) || state < 0)
37939@@ -1766,7 +1766,7 @@ int pm_genpd_name_attach_cpuidle(const char *name, int state)
37940 int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd)
37941 {
37942 struct gpd_cpuidle_data *cpuidle_data;
37943- struct cpuidle_state *idle_state;
37944+ cpuidle_state_no_const *idle_state;
37945 int ret = 0;
37946
37947 if (IS_ERR_OR_NULL(genpd))
37948@@ -2195,7 +2195,10 @@ int genpd_dev_pm_attach(struct device *dev)
37949 return ret;
37950 }
37951
37952- dev->pm_domain->detach = genpd_dev_pm_detach;
37953+ pax_open_kernel();
37954+ *(void **)&dev->pm_domain->detach = genpd_dev_pm_detach;
37955+ pax_close_kernel();
37956+
37957 pm_genpd_poweron(pd);
37958
37959 return 0;
37960diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
37961index d2be3f9..0a3167a 100644
37962--- a/drivers/base/power/sysfs.c
37963+++ b/drivers/base/power/sysfs.c
37964@@ -181,7 +181,7 @@ static ssize_t rtpm_status_show(struct device *dev,
37965 return -EIO;
37966 }
37967 }
37968- return sprintf(buf, p);
37969+ return sprintf(buf, "%s", p);
37970 }
37971
37972 static DEVICE_ATTR(runtime_status, 0444, rtpm_status_show, NULL);
37973diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
37974index aab7158..b172db2 100644
37975--- a/drivers/base/power/wakeup.c
37976+++ b/drivers/base/power/wakeup.c
37977@@ -32,14 +32,14 @@ static bool pm_abort_suspend __read_mostly;
37978 * They need to be modified together atomically, so it's better to use one
37979 * atomic variable to hold them both.
37980 */
37981-static atomic_t combined_event_count = ATOMIC_INIT(0);
37982+static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
37983
37984 #define IN_PROGRESS_BITS (sizeof(int) * 4)
37985 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
37986
37987 static void split_counters(unsigned int *cnt, unsigned int *inpr)
37988 {
37989- unsigned int comb = atomic_read(&combined_event_count);
37990+ unsigned int comb = atomic_read_unchecked(&combined_event_count);
37991
37992 *cnt = (comb >> IN_PROGRESS_BITS);
37993 *inpr = comb & MAX_IN_PROGRESS;
37994@@ -404,7 +404,7 @@ static void wakeup_source_activate(struct wakeup_source *ws)
37995 ws->start_prevent_time = ws->last_time;
37996
37997 /* Increment the counter of events in progress. */
37998- cec = atomic_inc_return(&combined_event_count);
37999+ cec = atomic_inc_return_unchecked(&combined_event_count);
38000
38001 trace_wakeup_source_activate(ws->name, cec);
38002 }
38003@@ -530,7 +530,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
38004 * Increment the counter of registered wakeup events and decrement the
38005 * couter of wakeup events in progress simultaneously.
38006 */
38007- cec = atomic_add_return(MAX_IN_PROGRESS, &combined_event_count);
38008+ cec = atomic_add_return_unchecked(MAX_IN_PROGRESS, &combined_event_count);
38009 trace_wakeup_source_deactivate(ws->name, cec);
38010
38011 split_counters(&cnt, &inpr);
38012diff --git a/drivers/base/syscore.c b/drivers/base/syscore.c
38013index 8d98a32..61d3165 100644
38014--- a/drivers/base/syscore.c
38015+++ b/drivers/base/syscore.c
38016@@ -22,7 +22,7 @@ static DEFINE_MUTEX(syscore_ops_lock);
38017 void register_syscore_ops(struct syscore_ops *ops)
38018 {
38019 mutex_lock(&syscore_ops_lock);
38020- list_add_tail(&ops->node, &syscore_ops_list);
38021+ pax_list_add_tail((struct list_head *)&ops->node, &syscore_ops_list);
38022 mutex_unlock(&syscore_ops_lock);
38023 }
38024 EXPORT_SYMBOL_GPL(register_syscore_ops);
38025@@ -34,7 +34,7 @@ EXPORT_SYMBOL_GPL(register_syscore_ops);
38026 void unregister_syscore_ops(struct syscore_ops *ops)
38027 {
38028 mutex_lock(&syscore_ops_lock);
38029- list_del(&ops->node);
38030+ pax_list_del((struct list_head *)&ops->node);
38031 mutex_unlock(&syscore_ops_lock);
38032 }
38033 EXPORT_SYMBOL_GPL(unregister_syscore_ops);
38034diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
38035index ff20f19..018f1da 100644
38036--- a/drivers/block/cciss.c
38037+++ b/drivers/block/cciss.c
38038@@ -3008,7 +3008,7 @@ static void start_io(ctlr_info_t *h)
38039 while (!list_empty(&h->reqQ)) {
38040 c = list_entry(h->reqQ.next, CommandList_struct, list);
38041 /* can't do anything if fifo is full */
38042- if ((h->access.fifo_full(h))) {
38043+ if ((h->access->fifo_full(h))) {
38044 dev_warn(&h->pdev->dev, "fifo full\n");
38045 break;
38046 }
38047@@ -3018,7 +3018,7 @@ static void start_io(ctlr_info_t *h)
38048 h->Qdepth--;
38049
38050 /* Tell the controller execute command */
38051- h->access.submit_command(h, c);
38052+ h->access->submit_command(h, c);
38053
38054 /* Put job onto the completed Q */
38055 addQ(&h->cmpQ, c);
38056@@ -3444,17 +3444,17 @@ startio:
38057
38058 static inline unsigned long get_next_completion(ctlr_info_t *h)
38059 {
38060- return h->access.command_completed(h);
38061+ return h->access->command_completed(h);
38062 }
38063
38064 static inline int interrupt_pending(ctlr_info_t *h)
38065 {
38066- return h->access.intr_pending(h);
38067+ return h->access->intr_pending(h);
38068 }
38069
38070 static inline long interrupt_not_for_us(ctlr_info_t *h)
38071 {
38072- return ((h->access.intr_pending(h) == 0) ||
38073+ return ((h->access->intr_pending(h) == 0) ||
38074 (h->interrupts_enabled == 0));
38075 }
38076
38077@@ -3487,7 +3487,7 @@ static inline u32 next_command(ctlr_info_t *h)
38078 u32 a;
38079
38080 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
38081- return h->access.command_completed(h);
38082+ return h->access->command_completed(h);
38083
38084 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
38085 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
38086@@ -4044,7 +4044,7 @@ static void cciss_put_controller_into_performant_mode(ctlr_info_t *h)
38087 trans_support & CFGTBL_Trans_use_short_tags);
38088
38089 /* Change the access methods to the performant access methods */
38090- h->access = SA5_performant_access;
38091+ h->access = &SA5_performant_access;
38092 h->transMethod = CFGTBL_Trans_Performant;
38093
38094 return;
38095@@ -4318,7 +4318,7 @@ static int cciss_pci_init(ctlr_info_t *h)
38096 if (prod_index < 0)
38097 return -ENODEV;
38098 h->product_name = products[prod_index].product_name;
38099- h->access = *(products[prod_index].access);
38100+ h->access = products[prod_index].access;
38101
38102 if (cciss_board_disabled(h)) {
38103 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
38104@@ -5050,7 +5050,7 @@ reinit_after_soft_reset:
38105 }
38106
38107 /* make sure the board interrupts are off */
38108- h->access.set_intr_mask(h, CCISS_INTR_OFF);
38109+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
38110 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
38111 if (rc)
38112 goto clean2;
38113@@ -5100,7 +5100,7 @@ reinit_after_soft_reset:
38114 * fake ones to scoop up any residual completions.
38115 */
38116 spin_lock_irqsave(&h->lock, flags);
38117- h->access.set_intr_mask(h, CCISS_INTR_OFF);
38118+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
38119 spin_unlock_irqrestore(&h->lock, flags);
38120 free_irq(h->intr[h->intr_mode], h);
38121 rc = cciss_request_irq(h, cciss_msix_discard_completions,
38122@@ -5120,9 +5120,9 @@ reinit_after_soft_reset:
38123 dev_info(&h->pdev->dev, "Board READY.\n");
38124 dev_info(&h->pdev->dev,
38125 "Waiting for stale completions to drain.\n");
38126- h->access.set_intr_mask(h, CCISS_INTR_ON);
38127+ h->access->set_intr_mask(h, CCISS_INTR_ON);
38128 msleep(10000);
38129- h->access.set_intr_mask(h, CCISS_INTR_OFF);
38130+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
38131
38132 rc = controller_reset_failed(h->cfgtable);
38133 if (rc)
38134@@ -5145,7 +5145,7 @@ reinit_after_soft_reset:
38135 cciss_scsi_setup(h);
38136
38137 /* Turn the interrupts on so we can service requests */
38138- h->access.set_intr_mask(h, CCISS_INTR_ON);
38139+ h->access->set_intr_mask(h, CCISS_INTR_ON);
38140
38141 /* Get the firmware version */
38142 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
38143@@ -5217,7 +5217,7 @@ static void cciss_shutdown(struct pci_dev *pdev)
38144 kfree(flush_buf);
38145 if (return_code != IO_OK)
38146 dev_warn(&h->pdev->dev, "Error flushing cache\n");
38147- h->access.set_intr_mask(h, CCISS_INTR_OFF);
38148+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
38149 free_irq(h->intr[h->intr_mode], h);
38150 }
38151
38152diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
38153index 7fda30e..2f27946 100644
38154--- a/drivers/block/cciss.h
38155+++ b/drivers/block/cciss.h
38156@@ -101,7 +101,7 @@ struct ctlr_info
38157 /* information about each logical volume */
38158 drive_info_struct *drv[CISS_MAX_LUN];
38159
38160- struct access_method access;
38161+ struct access_method *access;
38162
38163 /* queue and queue Info */
38164 struct list_head reqQ;
38165@@ -402,27 +402,27 @@ static bool SA5_performant_intr_pending(ctlr_info_t *h)
38166 }
38167
38168 static struct access_method SA5_access = {
38169- SA5_submit_command,
38170- SA5_intr_mask,
38171- SA5_fifo_full,
38172- SA5_intr_pending,
38173- SA5_completed,
38174+ .submit_command = SA5_submit_command,
38175+ .set_intr_mask = SA5_intr_mask,
38176+ .fifo_full = SA5_fifo_full,
38177+ .intr_pending = SA5_intr_pending,
38178+ .command_completed = SA5_completed,
38179 };
38180
38181 static struct access_method SA5B_access = {
38182- SA5_submit_command,
38183- SA5B_intr_mask,
38184- SA5_fifo_full,
38185- SA5B_intr_pending,
38186- SA5_completed,
38187+ .submit_command = SA5_submit_command,
38188+ .set_intr_mask = SA5B_intr_mask,
38189+ .fifo_full = SA5_fifo_full,
38190+ .intr_pending = SA5B_intr_pending,
38191+ .command_completed = SA5_completed,
38192 };
38193
38194 static struct access_method SA5_performant_access = {
38195- SA5_submit_command,
38196- SA5_performant_intr_mask,
38197- SA5_fifo_full,
38198- SA5_performant_intr_pending,
38199- SA5_performant_completed,
38200+ .submit_command = SA5_submit_command,
38201+ .set_intr_mask = SA5_performant_intr_mask,
38202+ .fifo_full = SA5_fifo_full,
38203+ .intr_pending = SA5_performant_intr_pending,
38204+ .command_completed = SA5_performant_completed,
38205 };
38206
38207 struct board_type {
38208diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
38209index 2b94403..fd6ad1f 100644
38210--- a/drivers/block/cpqarray.c
38211+++ b/drivers/block/cpqarray.c
38212@@ -404,7 +404,7 @@ static int cpqarray_register_ctlr(int i, struct pci_dev *pdev)
38213 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
38214 goto Enomem4;
38215 }
38216- hba[i]->access.set_intr_mask(hba[i], 0);
38217+ hba[i]->access->set_intr_mask(hba[i], 0);
38218 if (request_irq(hba[i]->intr, do_ida_intr,
38219 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
38220 {
38221@@ -459,7 +459,7 @@ static int cpqarray_register_ctlr(int i, struct pci_dev *pdev)
38222 add_timer(&hba[i]->timer);
38223
38224 /* Enable IRQ now that spinlock and rate limit timer are set up */
38225- hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
38226+ hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
38227
38228 for(j=0; j<NWD; j++) {
38229 struct gendisk *disk = ida_gendisk[i][j];
38230@@ -694,7 +694,7 @@ DBGINFO(
38231 for(i=0; i<NR_PRODUCTS; i++) {
38232 if (board_id == products[i].board_id) {
38233 c->product_name = products[i].product_name;
38234- c->access = *(products[i].access);
38235+ c->access = products[i].access;
38236 break;
38237 }
38238 }
38239@@ -792,7 +792,7 @@ static int cpqarray_eisa_detect(void)
38240 hba[ctlr]->intr = intr;
38241 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
38242 hba[ctlr]->product_name = products[j].product_name;
38243- hba[ctlr]->access = *(products[j].access);
38244+ hba[ctlr]->access = products[j].access;
38245 hba[ctlr]->ctlr = ctlr;
38246 hba[ctlr]->board_id = board_id;
38247 hba[ctlr]->pci_dev = NULL; /* not PCI */
38248@@ -978,7 +978,7 @@ static void start_io(ctlr_info_t *h)
38249
38250 while((c = h->reqQ) != NULL) {
38251 /* Can't do anything if we're busy */
38252- if (h->access.fifo_full(h) == 0)
38253+ if (h->access->fifo_full(h) == 0)
38254 return;
38255
38256 /* Get the first entry from the request Q */
38257@@ -986,7 +986,7 @@ static void start_io(ctlr_info_t *h)
38258 h->Qdepth--;
38259
38260 /* Tell the controller to do our bidding */
38261- h->access.submit_command(h, c);
38262+ h->access->submit_command(h, c);
38263
38264 /* Get onto the completion Q */
38265 addQ(&h->cmpQ, c);
38266@@ -1048,7 +1048,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
38267 unsigned long flags;
38268 __u32 a,a1;
38269
38270- istat = h->access.intr_pending(h);
38271+ istat = h->access->intr_pending(h);
38272 /* Is this interrupt for us? */
38273 if (istat == 0)
38274 return IRQ_NONE;
38275@@ -1059,7 +1059,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
38276 */
38277 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
38278 if (istat & FIFO_NOT_EMPTY) {
38279- while((a = h->access.command_completed(h))) {
38280+ while((a = h->access->command_completed(h))) {
38281 a1 = a; a &= ~3;
38282 if ((c = h->cmpQ) == NULL)
38283 {
38284@@ -1448,11 +1448,11 @@ static int sendcmd(
38285 /*
38286 * Disable interrupt
38287 */
38288- info_p->access.set_intr_mask(info_p, 0);
38289+ info_p->access->set_intr_mask(info_p, 0);
38290 /* Make sure there is room in the command FIFO */
38291 /* Actually it should be completely empty at this time. */
38292 for (i = 200000; i > 0; i--) {
38293- temp = info_p->access.fifo_full(info_p);
38294+ temp = info_p->access->fifo_full(info_p);
38295 if (temp != 0) {
38296 break;
38297 }
38298@@ -1465,7 +1465,7 @@ DBG(
38299 /*
38300 * Send the cmd
38301 */
38302- info_p->access.submit_command(info_p, c);
38303+ info_p->access->submit_command(info_p, c);
38304 complete = pollcomplete(ctlr);
38305
38306 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
38307@@ -1548,9 +1548,9 @@ static int revalidate_allvol(ctlr_info_t *host)
38308 * we check the new geometry. Then turn interrupts back on when
38309 * we're done.
38310 */
38311- host->access.set_intr_mask(host, 0);
38312+ host->access->set_intr_mask(host, 0);
38313 getgeometry(ctlr);
38314- host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
38315+ host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
38316
38317 for(i=0; i<NWD; i++) {
38318 struct gendisk *disk = ida_gendisk[ctlr][i];
38319@@ -1590,7 +1590,7 @@ static int pollcomplete(int ctlr)
38320 /* Wait (up to 2 seconds) for a command to complete */
38321
38322 for (i = 200000; i > 0; i--) {
38323- done = hba[ctlr]->access.command_completed(hba[ctlr]);
38324+ done = hba[ctlr]->access->command_completed(hba[ctlr]);
38325 if (done == 0) {
38326 udelay(10); /* a short fixed delay */
38327 } else
38328diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
38329index be73e9d..7fbf140 100644
38330--- a/drivers/block/cpqarray.h
38331+++ b/drivers/block/cpqarray.h
38332@@ -99,7 +99,7 @@ struct ctlr_info {
38333 drv_info_t drv[NWD];
38334 struct proc_dir_entry *proc;
38335
38336- struct access_method access;
38337+ struct access_method *access;
38338
38339 cmdlist_t *reqQ;
38340 cmdlist_t *cmpQ;
38341diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
38342index 434c77d..6d3219a 100644
38343--- a/drivers/block/drbd/drbd_bitmap.c
38344+++ b/drivers/block/drbd/drbd_bitmap.c
38345@@ -1036,7 +1036,7 @@ static void bm_page_io_async(struct drbd_bm_aio_ctx *ctx, int page_nr) __must_ho
38346 submit_bio(rw, bio);
38347 /* this should not count as user activity and cause the
38348 * resync to throttle -- see drbd_rs_should_slow_down(). */
38349- atomic_add(len >> 9, &device->rs_sect_ev);
38350+ atomic_add_unchecked(len >> 9, &device->rs_sect_ev);
38351 }
38352 }
38353
38354diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
38355index b905e98..0812ed8 100644
38356--- a/drivers/block/drbd/drbd_int.h
38357+++ b/drivers/block/drbd/drbd_int.h
38358@@ -385,7 +385,7 @@ struct drbd_epoch {
38359 struct drbd_connection *connection;
38360 struct list_head list;
38361 unsigned int barrier_nr;
38362- atomic_t epoch_size; /* increased on every request added. */
38363+ atomic_unchecked_t epoch_size; /* increased on every request added. */
38364 atomic_t active; /* increased on every req. added, and dec on every finished. */
38365 unsigned long flags;
38366 };
38367@@ -946,7 +946,7 @@ struct drbd_device {
38368 unsigned int al_tr_number;
38369 int al_tr_cycle;
38370 wait_queue_head_t seq_wait;
38371- atomic_t packet_seq;
38372+ atomic_unchecked_t packet_seq;
38373 unsigned int peer_seq;
38374 spinlock_t peer_seq_lock;
38375 unsigned long comm_bm_set; /* communicated number of set bits. */
38376@@ -955,8 +955,8 @@ struct drbd_device {
38377 struct mutex own_state_mutex;
38378 struct mutex *state_mutex; /* either own_state_mutex or first_peer_device(device)->connection->cstate_mutex */
38379 char congestion_reason; /* Why we where congested... */
38380- atomic_t rs_sect_in; /* for incoming resync data rate, SyncTarget */
38381- atomic_t rs_sect_ev; /* for submitted resync data rate, both */
38382+ atomic_unchecked_t rs_sect_in; /* for incoming resync data rate, SyncTarget */
38383+ atomic_unchecked_t rs_sect_ev; /* for submitted resync data rate, both */
38384 int rs_last_sect_ev; /* counter to compare with */
38385 int rs_last_events; /* counter of read or write "events" (unit sectors)
38386 * on the lower level device when we last looked. */
38387diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
38388index 1fc8342..7e7742b 100644
38389--- a/drivers/block/drbd/drbd_main.c
38390+++ b/drivers/block/drbd/drbd_main.c
38391@@ -1328,7 +1328,7 @@ static int _drbd_send_ack(struct drbd_peer_device *peer_device, enum drbd_packet
38392 p->sector = sector;
38393 p->block_id = block_id;
38394 p->blksize = blksize;
38395- p->seq_num = cpu_to_be32(atomic_inc_return(&peer_device->device->packet_seq));
38396+ p->seq_num = cpu_to_be32(atomic_inc_return_unchecked(&peer_device->device->packet_seq));
38397 return drbd_send_command(peer_device, sock, cmd, sizeof(*p), NULL, 0);
38398 }
38399
38400@@ -1634,7 +1634,7 @@ int drbd_send_dblock(struct drbd_peer_device *peer_device, struct drbd_request *
38401 return -EIO;
38402 p->sector = cpu_to_be64(req->i.sector);
38403 p->block_id = (unsigned long)req;
38404- p->seq_num = cpu_to_be32(atomic_inc_return(&device->packet_seq));
38405+ p->seq_num = cpu_to_be32(atomic_inc_return_unchecked(&device->packet_seq));
38406 dp_flags = bio_flags_to_wire(peer_device->connection, req->master_bio->bi_rw);
38407 if (device->state.conn >= C_SYNC_SOURCE &&
38408 device->state.conn <= C_PAUSED_SYNC_T)
38409@@ -1915,8 +1915,8 @@ void drbd_init_set_defaults(struct drbd_device *device)
38410 atomic_set(&device->unacked_cnt, 0);
38411 atomic_set(&device->local_cnt, 0);
38412 atomic_set(&device->pp_in_use_by_net, 0);
38413- atomic_set(&device->rs_sect_in, 0);
38414- atomic_set(&device->rs_sect_ev, 0);
38415+ atomic_set_unchecked(&device->rs_sect_in, 0);
38416+ atomic_set_unchecked(&device->rs_sect_ev, 0);
38417 atomic_set(&device->ap_in_flight, 0);
38418 atomic_set(&device->md_io.in_use, 0);
38419
38420@@ -2684,8 +2684,8 @@ void drbd_destroy_connection(struct kref *kref)
38421 struct drbd_connection *connection = container_of(kref, struct drbd_connection, kref);
38422 struct drbd_resource *resource = connection->resource;
38423
38424- if (atomic_read(&connection->current_epoch->epoch_size) != 0)
38425- drbd_err(connection, "epoch_size:%d\n", atomic_read(&connection->current_epoch->epoch_size));
38426+ if (atomic_read_unchecked(&connection->current_epoch->epoch_size) != 0)
38427+ drbd_err(connection, "epoch_size:%d\n", atomic_read_unchecked(&connection->current_epoch->epoch_size));
38428 kfree(connection->current_epoch);
38429
38430 idr_destroy(&connection->peer_devices);
38431diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
38432index 74df8cf..e41fc24 100644
38433--- a/drivers/block/drbd/drbd_nl.c
38434+++ b/drivers/block/drbd/drbd_nl.c
38435@@ -3637,13 +3637,13 @@ finish:
38436
38437 void drbd_bcast_event(struct drbd_device *device, const struct sib_info *sib)
38438 {
38439- static atomic_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
38440+ static atomic_unchecked_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
38441 struct sk_buff *msg;
38442 struct drbd_genlmsghdr *d_out;
38443 unsigned seq;
38444 int err = -ENOMEM;
38445
38446- seq = atomic_inc_return(&drbd_genl_seq);
38447+ seq = atomic_inc_return_unchecked(&drbd_genl_seq);
38448 msg = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
38449 if (!msg)
38450 goto failed;
38451diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
38452index cee2035..22f66bd 100644
38453--- a/drivers/block/drbd/drbd_receiver.c
38454+++ b/drivers/block/drbd/drbd_receiver.c
38455@@ -870,7 +870,7 @@ int drbd_connected(struct drbd_peer_device *peer_device)
38456 struct drbd_device *device = peer_device->device;
38457 int err;
38458
38459- atomic_set(&device->packet_seq, 0);
38460+ atomic_set_unchecked(&device->packet_seq, 0);
38461 device->peer_seq = 0;
38462
38463 device->state_mutex = peer_device->connection->agreed_pro_version < 100 ?
38464@@ -1233,7 +1233,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_connection *connectio
38465 do {
38466 next_epoch = NULL;
38467
38468- epoch_size = atomic_read(&epoch->epoch_size);
38469+ epoch_size = atomic_read_unchecked(&epoch->epoch_size);
38470
38471 switch (ev & ~EV_CLEANUP) {
38472 case EV_PUT:
38473@@ -1273,7 +1273,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_connection *connectio
38474 rv = FE_DESTROYED;
38475 } else {
38476 epoch->flags = 0;
38477- atomic_set(&epoch->epoch_size, 0);
38478+ atomic_set_unchecked(&epoch->epoch_size, 0);
38479 /* atomic_set(&epoch->active, 0); is already zero */
38480 if (rv == FE_STILL_LIVE)
38481 rv = FE_RECYCLED;
38482@@ -1550,7 +1550,7 @@ static int receive_Barrier(struct drbd_connection *connection, struct packet_inf
38483 conn_wait_active_ee_empty(connection);
38484 drbd_flush(connection);
38485
38486- if (atomic_read(&connection->current_epoch->epoch_size)) {
38487+ if (atomic_read_unchecked(&connection->current_epoch->epoch_size)) {
38488 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
38489 if (epoch)
38490 break;
38491@@ -1564,11 +1564,11 @@ static int receive_Barrier(struct drbd_connection *connection, struct packet_inf
38492 }
38493
38494 epoch->flags = 0;
38495- atomic_set(&epoch->epoch_size, 0);
38496+ atomic_set_unchecked(&epoch->epoch_size, 0);
38497 atomic_set(&epoch->active, 0);
38498
38499 spin_lock(&connection->epoch_lock);
38500- if (atomic_read(&connection->current_epoch->epoch_size)) {
38501+ if (atomic_read_unchecked(&connection->current_epoch->epoch_size)) {
38502 list_add(&epoch->list, &connection->current_epoch->list);
38503 connection->current_epoch = epoch;
38504 connection->epochs++;
38505@@ -1802,7 +1802,7 @@ static int recv_resync_read(struct drbd_peer_device *peer_device, sector_t secto
38506 list_add_tail(&peer_req->w.list, &device->sync_ee);
38507 spin_unlock_irq(&device->resource->req_lock);
38508
38509- atomic_add(pi->size >> 9, &device->rs_sect_ev);
38510+ atomic_add_unchecked(pi->size >> 9, &device->rs_sect_ev);
38511 if (drbd_submit_peer_request(device, peer_req, WRITE, DRBD_FAULT_RS_WR) == 0)
38512 return 0;
38513
38514@@ -1900,7 +1900,7 @@ static int receive_RSDataReply(struct drbd_connection *connection, struct packet
38515 drbd_send_ack_dp(peer_device, P_NEG_ACK, p, pi->size);
38516 }
38517
38518- atomic_add(pi->size >> 9, &device->rs_sect_in);
38519+ atomic_add_unchecked(pi->size >> 9, &device->rs_sect_in);
38520
38521 return err;
38522 }
38523@@ -2290,7 +2290,7 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info *
38524
38525 err = wait_for_and_update_peer_seq(peer_device, peer_seq);
38526 drbd_send_ack_dp(peer_device, P_NEG_ACK, p, pi->size);
38527- atomic_inc(&connection->current_epoch->epoch_size);
38528+ atomic_inc_unchecked(&connection->current_epoch->epoch_size);
38529 err2 = drbd_drain_block(peer_device, pi->size);
38530 if (!err)
38531 err = err2;
38532@@ -2334,7 +2334,7 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info *
38533
38534 spin_lock(&connection->epoch_lock);
38535 peer_req->epoch = connection->current_epoch;
38536- atomic_inc(&peer_req->epoch->epoch_size);
38537+ atomic_inc_unchecked(&peer_req->epoch->epoch_size);
38538 atomic_inc(&peer_req->epoch->active);
38539 spin_unlock(&connection->epoch_lock);
38540
38541@@ -2479,7 +2479,7 @@ bool drbd_rs_c_min_rate_throttle(struct drbd_device *device)
38542
38543 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
38544 (int)part_stat_read(&disk->part0, sectors[1]) -
38545- atomic_read(&device->rs_sect_ev);
38546+ atomic_read_unchecked(&device->rs_sect_ev);
38547
38548 if (atomic_read(&device->ap_actlog_cnt)
38549 || curr_events - device->rs_last_events > 64) {
38550@@ -2618,7 +2618,7 @@ static int receive_DataRequest(struct drbd_connection *connection, struct packet
38551 device->use_csums = true;
38552 } else if (pi->cmd == P_OV_REPLY) {
38553 /* track progress, we may need to throttle */
38554- atomic_add(size >> 9, &device->rs_sect_in);
38555+ atomic_add_unchecked(size >> 9, &device->rs_sect_in);
38556 peer_req->w.cb = w_e_end_ov_reply;
38557 dec_rs_pending(device);
38558 /* drbd_rs_begin_io done when we sent this request,
38559@@ -2691,7 +2691,7 @@ static int receive_DataRequest(struct drbd_connection *connection, struct packet
38560 goto out_free_e;
38561
38562 submit_for_resync:
38563- atomic_add(size >> 9, &device->rs_sect_ev);
38564+ atomic_add_unchecked(size >> 9, &device->rs_sect_ev);
38565
38566 submit:
38567 update_receiver_timing_details(connection, drbd_submit_peer_request);
38568@@ -4564,7 +4564,7 @@ struct data_cmd {
38569 int expect_payload;
38570 size_t pkt_size;
38571 int (*fn)(struct drbd_connection *, struct packet_info *);
38572-};
38573+} __do_const;
38574
38575 static struct data_cmd drbd_cmd_handler[] = {
38576 [P_DATA] = { 1, sizeof(struct p_data), receive_Data },
38577@@ -4678,7 +4678,7 @@ static void conn_disconnect(struct drbd_connection *connection)
38578 if (!list_empty(&connection->current_epoch->list))
38579 drbd_err(connection, "ASSERTION FAILED: connection->current_epoch->list not empty\n");
38580 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
38581- atomic_set(&connection->current_epoch->epoch_size, 0);
38582+ atomic_set_unchecked(&connection->current_epoch->epoch_size, 0);
38583 connection->send.seen_any_write_yet = false;
38584
38585 drbd_info(connection, "Connection closed\n");
38586@@ -5182,7 +5182,7 @@ static int got_IsInSync(struct drbd_connection *connection, struct packet_info *
38587 put_ldev(device);
38588 }
38589 dec_rs_pending(device);
38590- atomic_add(blksize >> 9, &device->rs_sect_in);
38591+ atomic_add_unchecked(blksize >> 9, &device->rs_sect_in);
38592
38593 return 0;
38594 }
38595@@ -5470,7 +5470,7 @@ static int connection_finish_peer_reqs(struct drbd_connection *connection)
38596 struct asender_cmd {
38597 size_t pkt_size;
38598 int (*fn)(struct drbd_connection *connection, struct packet_info *);
38599-};
38600+} __do_const;
38601
38602 static struct asender_cmd asender_tbl[] = {
38603 [P_PING] = { 0, got_Ping },
38604diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
38605index d0fae55..4469096 100644
38606--- a/drivers/block/drbd/drbd_worker.c
38607+++ b/drivers/block/drbd/drbd_worker.c
38608@@ -408,7 +408,7 @@ static int read_for_csum(struct drbd_peer_device *peer_device, sector_t sector,
38609 list_add_tail(&peer_req->w.list, &device->read_ee);
38610 spin_unlock_irq(&device->resource->req_lock);
38611
38612- atomic_add(size >> 9, &device->rs_sect_ev);
38613+ atomic_add_unchecked(size >> 9, &device->rs_sect_ev);
38614 if (drbd_submit_peer_request(device, peer_req, READ, DRBD_FAULT_RS_RD) == 0)
38615 return 0;
38616
38617@@ -553,7 +553,7 @@ static int drbd_rs_number_requests(struct drbd_device *device)
38618 unsigned int sect_in; /* Number of sectors that came in since the last turn */
38619 int number, mxb;
38620
38621- sect_in = atomic_xchg(&device->rs_sect_in, 0);
38622+ sect_in = atomic_xchg_unchecked(&device->rs_sect_in, 0);
38623 device->rs_in_flight -= sect_in;
38624
38625 rcu_read_lock();
38626@@ -1595,8 +1595,8 @@ void drbd_rs_controller_reset(struct drbd_device *device)
38627 struct gendisk *disk = device->ldev->backing_bdev->bd_contains->bd_disk;
38628 struct fifo_buffer *plan;
38629
38630- atomic_set(&device->rs_sect_in, 0);
38631- atomic_set(&device->rs_sect_ev, 0);
38632+ atomic_set_unchecked(&device->rs_sect_in, 0);
38633+ atomic_set_unchecked(&device->rs_sect_ev, 0);
38634 device->rs_in_flight = 0;
38635 device->rs_last_events =
38636 (int)part_stat_read(&disk->part0, sectors[0]) +
38637diff --git a/drivers/block/loop.c b/drivers/block/loop.c
38638index 773e964..e85af00 100644
38639--- a/drivers/block/loop.c
38640+++ b/drivers/block/loop.c
38641@@ -234,7 +234,7 @@ static int __do_lo_send_write(struct file *file,
38642
38643 file_start_write(file);
38644 set_fs(get_ds());
38645- bw = file->f_op->write(file, buf, len, &pos);
38646+ bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
38647 set_fs(old_fs);
38648 file_end_write(file);
38649 if (likely(bw == len))
38650diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
38651index 09e628da..7607aaa 100644
38652--- a/drivers/block/pktcdvd.c
38653+++ b/drivers/block/pktcdvd.c
38654@@ -108,7 +108,7 @@ static int pkt_seq_show(struct seq_file *m, void *p);
38655
38656 static sector_t get_zone(sector_t sector, struct pktcdvd_device *pd)
38657 {
38658- return (sector + pd->offset) & ~(sector_t)(pd->settings.size - 1);
38659+ return (sector + pd->offset) & ~(sector_t)(pd->settings.size - 1UL);
38660 }
38661
38662 /*
38663@@ -1890,7 +1890,7 @@ static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd)
38664 return -EROFS;
38665 }
38666 pd->settings.fp = ti.fp;
38667- pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1);
38668+ pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1UL);
38669
38670 if (ti.nwa_v) {
38671 pd->nwa = be32_to_cpu(ti.next_writable);
38672diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
38673index b67066d..515b7f4 100644
38674--- a/drivers/block/rbd.c
38675+++ b/drivers/block/rbd.c
38676@@ -64,7 +64,7 @@
38677 * If the counter is already at its maximum value returns
38678 * -EINVAL without updating it.
38679 */
38680-static int atomic_inc_return_safe(atomic_t *v)
38681+static int __intentional_overflow(-1) atomic_inc_return_safe(atomic_t *v)
38682 {
38683 unsigned int counter;
38684
38685diff --git a/drivers/block/smart1,2.h b/drivers/block/smart1,2.h
38686index e5565fb..71be10b4 100644
38687--- a/drivers/block/smart1,2.h
38688+++ b/drivers/block/smart1,2.h
38689@@ -108,11 +108,11 @@ static unsigned long smart4_intr_pending(ctlr_info_t *h)
38690 }
38691
38692 static struct access_method smart4_access = {
38693- smart4_submit_command,
38694- smart4_intr_mask,
38695- smart4_fifo_full,
38696- smart4_intr_pending,
38697- smart4_completed,
38698+ .submit_command = smart4_submit_command,
38699+ .set_intr_mask = smart4_intr_mask,
38700+ .fifo_full = smart4_fifo_full,
38701+ .intr_pending = smart4_intr_pending,
38702+ .command_completed = smart4_completed,
38703 };
38704
38705 /*
38706@@ -144,11 +144,11 @@ static unsigned long smart2_intr_pending(ctlr_info_t *h)
38707 }
38708
38709 static struct access_method smart2_access = {
38710- smart2_submit_command,
38711- smart2_intr_mask,
38712- smart2_fifo_full,
38713- smart2_intr_pending,
38714- smart2_completed,
38715+ .submit_command = smart2_submit_command,
38716+ .set_intr_mask = smart2_intr_mask,
38717+ .fifo_full = smart2_fifo_full,
38718+ .intr_pending = smart2_intr_pending,
38719+ .command_completed = smart2_completed,
38720 };
38721
38722 /*
38723@@ -180,11 +180,11 @@ static unsigned long smart2e_intr_pending(ctlr_info_t *h)
38724 }
38725
38726 static struct access_method smart2e_access = {
38727- smart2e_submit_command,
38728- smart2e_intr_mask,
38729- smart2e_fifo_full,
38730- smart2e_intr_pending,
38731- smart2e_completed,
38732+ .submit_command = smart2e_submit_command,
38733+ .set_intr_mask = smart2e_intr_mask,
38734+ .fifo_full = smart2e_fifo_full,
38735+ .intr_pending = smart2e_intr_pending,
38736+ .command_completed = smart2e_completed,
38737 };
38738
38739 /*
38740@@ -270,9 +270,9 @@ static unsigned long smart1_intr_pending(ctlr_info_t *h)
38741 }
38742
38743 static struct access_method smart1_access = {
38744- smart1_submit_command,
38745- smart1_intr_mask,
38746- smart1_fifo_full,
38747- smart1_intr_pending,
38748- smart1_completed,
38749+ .submit_command = smart1_submit_command,
38750+ .set_intr_mask = smart1_intr_mask,
38751+ .fifo_full = smart1_fifo_full,
38752+ .intr_pending = smart1_intr_pending,
38753+ .command_completed = smart1_completed,
38754 };
38755diff --git a/drivers/bluetooth/btwilink.c b/drivers/bluetooth/btwilink.c
38756index 55c135b..9f8d60c 100644
38757--- a/drivers/bluetooth/btwilink.c
38758+++ b/drivers/bluetooth/btwilink.c
38759@@ -288,7 +288,7 @@ static int ti_st_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
38760
38761 static int bt_ti_probe(struct platform_device *pdev)
38762 {
38763- static struct ti_st *hst;
38764+ struct ti_st *hst;
38765 struct hci_dev *hdev;
38766 int err;
38767
38768diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
38769index 5d28a45..a538f90 100644
38770--- a/drivers/cdrom/cdrom.c
38771+++ b/drivers/cdrom/cdrom.c
38772@@ -610,7 +610,6 @@ int register_cdrom(struct cdrom_device_info *cdi)
38773 ENSURE(reset, CDC_RESET);
38774 ENSURE(generic_packet, CDC_GENERIC_PACKET);
38775 cdi->mc_flags = 0;
38776- cdo->n_minors = 0;
38777 cdi->options = CDO_USE_FFLAGS;
38778
38779 if (autoclose == 1 && CDROM_CAN(CDC_CLOSE_TRAY))
38780@@ -630,8 +629,11 @@ int register_cdrom(struct cdrom_device_info *cdi)
38781 else
38782 cdi->cdda_method = CDDA_OLD;
38783
38784- if (!cdo->generic_packet)
38785- cdo->generic_packet = cdrom_dummy_generic_packet;
38786+ if (!cdo->generic_packet) {
38787+ pax_open_kernel();
38788+ *(void **)&cdo->generic_packet = cdrom_dummy_generic_packet;
38789+ pax_close_kernel();
38790+ }
38791
38792 cd_dbg(CD_REG_UNREG, "drive \"/dev/%s\" registered\n", cdi->name);
38793 mutex_lock(&cdrom_mutex);
38794@@ -652,7 +654,6 @@ void unregister_cdrom(struct cdrom_device_info *cdi)
38795 if (cdi->exit)
38796 cdi->exit(cdi);
38797
38798- cdi->ops->n_minors--;
38799 cd_dbg(CD_REG_UNREG, "drive \"/dev/%s\" unregistered\n", cdi->name);
38800 }
38801
38802@@ -2126,7 +2127,7 @@ static int cdrom_read_cdda_old(struct cdrom_device_info *cdi, __u8 __user *ubuf,
38803 */
38804 nr = nframes;
38805 do {
38806- cgc.buffer = kmalloc(CD_FRAMESIZE_RAW * nr, GFP_KERNEL);
38807+ cgc.buffer = kzalloc(CD_FRAMESIZE_RAW * nr, GFP_KERNEL);
38808 if (cgc.buffer)
38809 break;
38810
38811@@ -3434,7 +3435,7 @@ static int cdrom_print_info(const char *header, int val, char *info,
38812 struct cdrom_device_info *cdi;
38813 int ret;
38814
38815- ret = scnprintf(info + *pos, max_size - *pos, header);
38816+ ret = scnprintf(info + *pos, max_size - *pos, "%s", header);
38817 if (!ret)
38818 return 1;
38819
38820diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
38821index 584bc31..e64a12c 100644
38822--- a/drivers/cdrom/gdrom.c
38823+++ b/drivers/cdrom/gdrom.c
38824@@ -491,7 +491,6 @@ static struct cdrom_device_ops gdrom_ops = {
38825 .audio_ioctl = gdrom_audio_ioctl,
38826 .capability = CDC_MULTI_SESSION | CDC_MEDIA_CHANGED |
38827 CDC_RESET | CDC_DRIVE_STATUS | CDC_CD_R,
38828- .n_minors = 1,
38829 };
38830
38831 static int gdrom_bdops_open(struct block_device *bdev, fmode_t mode)
38832diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
38833index a4af822..ed58cd1 100644
38834--- a/drivers/char/Kconfig
38835+++ b/drivers/char/Kconfig
38836@@ -17,7 +17,8 @@ config DEVMEM
38837
38838 config DEVKMEM
38839 bool "/dev/kmem virtual device support"
38840- default y
38841+ default n
38842+ depends on !GRKERNSEC_KMEM
38843 help
38844 Say Y here if you want to support the /dev/kmem device. The
38845 /dev/kmem device is rarely used, but can be used for certain
38846@@ -586,6 +587,7 @@ config DEVPORT
38847 bool
38848 depends on !M68K
38849 depends on ISA || PCI
38850+ depends on !GRKERNSEC_KMEM
38851 default y
38852
38853 source "drivers/s390/char/Kconfig"
38854diff --git a/drivers/char/agp/compat_ioctl.c b/drivers/char/agp/compat_ioctl.c
38855index a48e05b..6bac831 100644
38856--- a/drivers/char/agp/compat_ioctl.c
38857+++ b/drivers/char/agp/compat_ioctl.c
38858@@ -108,7 +108,7 @@ static int compat_agpioc_reserve_wrap(struct agp_file_private *priv, void __user
38859 return -ENOMEM;
38860 }
38861
38862- if (copy_from_user(usegment, (void __user *) ureserve.seg_list,
38863+ if (copy_from_user(usegment, (void __force_user *) ureserve.seg_list,
38864 sizeof(*usegment) * ureserve.seg_count)) {
38865 kfree(usegment);
38866 kfree(ksegment);
38867diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
38868index 09f17eb..8531d2f 100644
38869--- a/drivers/char/agp/frontend.c
38870+++ b/drivers/char/agp/frontend.c
38871@@ -806,7 +806,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
38872 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
38873 return -EFAULT;
38874
38875- if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
38876+ if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
38877 return -EFAULT;
38878
38879 client = agp_find_client_by_pid(reserve.pid);
38880@@ -836,7 +836,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
38881 if (segment == NULL)
38882 return -ENOMEM;
38883
38884- if (copy_from_user(segment, (void __user *) reserve.seg_list,
38885+ if (copy_from_user(segment, (void __force_user *) reserve.seg_list,
38886 sizeof(struct agp_segment) * reserve.seg_count)) {
38887 kfree(segment);
38888 return -EFAULT;
38889diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
38890index 4f94375..413694e 100644
38891--- a/drivers/char/genrtc.c
38892+++ b/drivers/char/genrtc.c
38893@@ -273,6 +273,7 @@ static int gen_rtc_ioctl(struct file *file,
38894 switch (cmd) {
38895
38896 case RTC_PLL_GET:
38897+ memset(&pll, 0, sizeof(pll));
38898 if (get_rtc_pll(&pll))
38899 return -EINVAL;
38900 else
38901diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
38902index 5c0baa9..44011b1 100644
38903--- a/drivers/char/hpet.c
38904+++ b/drivers/char/hpet.c
38905@@ -575,7 +575,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
38906 }
38907
38908 static int
38909-hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
38910+hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
38911 struct hpet_info *info)
38912 {
38913 struct hpet_timer __iomem *timer;
38914diff --git a/drivers/char/i8k.c b/drivers/char/i8k.c
38915index 24cc4ed..f9807cf 100644
38916--- a/drivers/char/i8k.c
38917+++ b/drivers/char/i8k.c
38918@@ -788,7 +788,7 @@ static const struct i8k_config_data i8k_config_data[] = {
38919 },
38920 };
38921
38922-static struct dmi_system_id i8k_dmi_table[] __initdata = {
38923+static const struct dmi_system_id i8k_dmi_table[] __initconst = {
38924 {
38925 .ident = "Dell Inspiron",
38926 .matches = {
38927diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
38928index 9bb5928..57a7801 100644
38929--- a/drivers/char/ipmi/ipmi_msghandler.c
38930+++ b/drivers/char/ipmi/ipmi_msghandler.c
38931@@ -436,7 +436,7 @@ struct ipmi_smi {
38932 struct proc_dir_entry *proc_dir;
38933 char proc_dir_name[10];
38934
38935- atomic_t stats[IPMI_NUM_STATS];
38936+ atomic_unchecked_t stats[IPMI_NUM_STATS];
38937
38938 /*
38939 * run_to_completion duplicate of smb_info, smi_info
38940@@ -468,9 +468,9 @@ static LIST_HEAD(smi_watchers);
38941 static DEFINE_MUTEX(smi_watchers_mutex);
38942
38943 #define ipmi_inc_stat(intf, stat) \
38944- atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
38945+ atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
38946 #define ipmi_get_stat(intf, stat) \
38947- ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
38948+ ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
38949
38950 static char *addr_src_to_str[] = { "invalid", "hotmod", "hardcoded", "SPMI",
38951 "ACPI", "SMBIOS", "PCI",
38952@@ -2828,7 +2828,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
38953 INIT_LIST_HEAD(&intf->cmd_rcvrs);
38954 init_waitqueue_head(&intf->waitq);
38955 for (i = 0; i < IPMI_NUM_STATS; i++)
38956- atomic_set(&intf->stats[i], 0);
38957+ atomic_set_unchecked(&intf->stats[i], 0);
38958
38959 intf->proc_dir = NULL;
38960
38961diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
38962index 518585c..6c985cef 100644
38963--- a/drivers/char/ipmi/ipmi_si_intf.c
38964+++ b/drivers/char/ipmi/ipmi_si_intf.c
38965@@ -289,7 +289,7 @@ struct smi_info {
38966 unsigned char slave_addr;
38967
38968 /* Counters and things for the proc filesystem. */
38969- atomic_t stats[SI_NUM_STATS];
38970+ atomic_unchecked_t stats[SI_NUM_STATS];
38971
38972 struct task_struct *thread;
38973
38974@@ -298,9 +298,9 @@ struct smi_info {
38975 };
38976
38977 #define smi_inc_stat(smi, stat) \
38978- atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
38979+ atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
38980 #define smi_get_stat(smi, stat) \
38981- ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
38982+ ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
38983
38984 #define SI_MAX_PARMS 4
38985
38986@@ -3498,7 +3498,7 @@ static int try_smi_init(struct smi_info *new_smi)
38987 atomic_set(&new_smi->req_events, 0);
38988 new_smi->run_to_completion = false;
38989 for (i = 0; i < SI_NUM_STATS; i++)
38990- atomic_set(&new_smi->stats[i], 0);
38991+ atomic_set_unchecked(&new_smi->stats[i], 0);
38992
38993 new_smi->interrupt_disabled = true;
38994 atomic_set(&new_smi->need_watch, 0);
38995diff --git a/drivers/char/mem.c b/drivers/char/mem.c
38996index 297110c..3f69b43 100644
38997--- a/drivers/char/mem.c
38998+++ b/drivers/char/mem.c
38999@@ -18,6 +18,7 @@
39000 #include <linux/raw.h>
39001 #include <linux/tty.h>
39002 #include <linux/capability.h>
39003+#include <linux/security.h>
39004 #include <linux/ptrace.h>
39005 #include <linux/device.h>
39006 #include <linux/highmem.h>
39007@@ -36,6 +37,10 @@
39008
39009 #define DEVPORT_MINOR 4
39010
39011+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
39012+extern const struct file_operations grsec_fops;
39013+#endif
39014+
39015 static inline unsigned long size_inside_page(unsigned long start,
39016 unsigned long size)
39017 {
39018@@ -67,9 +72,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
39019
39020 while (cursor < to) {
39021 if (!devmem_is_allowed(pfn)) {
39022+#ifdef CONFIG_GRKERNSEC_KMEM
39023+ gr_handle_mem_readwrite(from, to);
39024+#else
39025 printk(KERN_INFO
39026 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
39027 current->comm, from, to);
39028+#endif
39029 return 0;
39030 }
39031 cursor += PAGE_SIZE;
39032@@ -77,6 +86,11 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
39033 }
39034 return 1;
39035 }
39036+#elif defined(CONFIG_GRKERNSEC_KMEM)
39037+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
39038+{
39039+ return 0;
39040+}
39041 #else
39042 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
39043 {
39044@@ -124,7 +138,8 @@ static ssize_t read_mem(struct file *file, char __user *buf,
39045 #endif
39046
39047 while (count > 0) {
39048- unsigned long remaining;
39049+ unsigned long remaining = 0;
39050+ char *temp;
39051
39052 sz = size_inside_page(p, count);
39053
39054@@ -140,7 +155,24 @@ static ssize_t read_mem(struct file *file, char __user *buf,
39055 if (!ptr)
39056 return -EFAULT;
39057
39058- remaining = copy_to_user(buf, ptr, sz);
39059+#ifdef CONFIG_PAX_USERCOPY
39060+ temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
39061+ if (!temp) {
39062+ unxlate_dev_mem_ptr(p, ptr);
39063+ return -ENOMEM;
39064+ }
39065+ remaining = probe_kernel_read(temp, ptr, sz);
39066+#else
39067+ temp = ptr;
39068+#endif
39069+
39070+ if (!remaining)
39071+ remaining = copy_to_user(buf, temp, sz);
39072+
39073+#ifdef CONFIG_PAX_USERCOPY
39074+ kfree(temp);
39075+#endif
39076+
39077 unxlate_dev_mem_ptr(p, ptr);
39078 if (remaining)
39079 return -EFAULT;
39080@@ -380,9 +412,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
39081 size_t count, loff_t *ppos)
39082 {
39083 unsigned long p = *ppos;
39084- ssize_t low_count, read, sz;
39085+ ssize_t low_count, read, sz, err = 0;
39086 char *kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
39087- int err = 0;
39088
39089 read = 0;
39090 if (p < (unsigned long) high_memory) {
39091@@ -404,6 +435,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
39092 }
39093 #endif
39094 while (low_count > 0) {
39095+ char *temp;
39096+
39097 sz = size_inside_page(p, low_count);
39098
39099 /*
39100@@ -413,7 +446,23 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
39101 */
39102 kbuf = xlate_dev_kmem_ptr((void *)p);
39103
39104- if (copy_to_user(buf, kbuf, sz))
39105+#ifdef CONFIG_PAX_USERCOPY
39106+ temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
39107+ if (!temp)
39108+ return -ENOMEM;
39109+ err = probe_kernel_read(temp, kbuf, sz);
39110+#else
39111+ temp = kbuf;
39112+#endif
39113+
39114+ if (!err)
39115+ err = copy_to_user(buf, temp, sz);
39116+
39117+#ifdef CONFIG_PAX_USERCOPY
39118+ kfree(temp);
39119+#endif
39120+
39121+ if (err)
39122 return -EFAULT;
39123 buf += sz;
39124 p += sz;
39125@@ -804,6 +853,9 @@ static const struct memdev {
39126 #ifdef CONFIG_PRINTK
39127 [11] = { "kmsg", 0644, &kmsg_fops, 0 },
39128 #endif
39129+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
39130+ [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, 0 },
39131+#endif
39132 };
39133
39134 static int memory_open(struct inode *inode, struct file *filp)
39135@@ -865,7 +917,7 @@ static int __init chr_dev_init(void)
39136 continue;
39137
39138 device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor),
39139- NULL, devlist[minor].name);
39140+ NULL, "%s", devlist[minor].name);
39141 }
39142
39143 return tty_init();
39144diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
39145index 9df78e2..01ba9ae 100644
39146--- a/drivers/char/nvram.c
39147+++ b/drivers/char/nvram.c
39148@@ -247,7 +247,7 @@ static ssize_t nvram_read(struct file *file, char __user *buf,
39149
39150 spin_unlock_irq(&rtc_lock);
39151
39152- if (copy_to_user(buf, contents, tmp - contents))
39153+ if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
39154 return -EFAULT;
39155
39156 *ppos = i;
39157diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
39158index 0ea9986..e7b07e4 100644
39159--- a/drivers/char/pcmcia/synclink_cs.c
39160+++ b/drivers/char/pcmcia/synclink_cs.c
39161@@ -2345,7 +2345,7 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
39162
39163 if (debug_level >= DEBUG_LEVEL_INFO)
39164 printk("%s(%d):mgslpc_close(%s) entry, count=%d\n",
39165- __FILE__, __LINE__, info->device_name, port->count);
39166+ __FILE__, __LINE__, info->device_name, atomic_read(&port->count));
39167
39168 if (tty_port_close_start(port, tty, filp) == 0)
39169 goto cleanup;
39170@@ -2363,7 +2363,7 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
39171 cleanup:
39172 if (debug_level >= DEBUG_LEVEL_INFO)
39173 printk("%s(%d):mgslpc_close(%s) exit, count=%d\n", __FILE__, __LINE__,
39174- tty->driver->name, port->count);
39175+ tty->driver->name, atomic_read(&port->count));
39176 }
39177
39178 /* Wait until the transmitter is empty.
39179@@ -2505,7 +2505,7 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
39180
39181 if (debug_level >= DEBUG_LEVEL_INFO)
39182 printk("%s(%d):mgslpc_open(%s), old ref count = %d\n",
39183- __FILE__, __LINE__, tty->driver->name, port->count);
39184+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
39185
39186 /* If port is closing, signal caller to try again */
39187 if (port->flags & ASYNC_CLOSING){
39188@@ -2525,11 +2525,11 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
39189 goto cleanup;
39190 }
39191 spin_lock(&port->lock);
39192- port->count++;
39193+ atomic_inc(&port->count);
39194 spin_unlock(&port->lock);
39195 spin_unlock_irqrestore(&info->netlock, flags);
39196
39197- if (port->count == 1) {
39198+ if (atomic_read(&port->count) == 1) {
39199 /* 1st open on this device, init hardware */
39200 retval = startup(info, tty);
39201 if (retval < 0)
39202@@ -3918,7 +3918,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
39203 unsigned short new_crctype;
39204
39205 /* return error if TTY interface open */
39206- if (info->port.count)
39207+ if (atomic_read(&info->port.count))
39208 return -EBUSY;
39209
39210 switch (encoding)
39211@@ -4022,7 +4022,7 @@ static int hdlcdev_open(struct net_device *dev)
39212
39213 /* arbitrate between network and tty opens */
39214 spin_lock_irqsave(&info->netlock, flags);
39215- if (info->port.count != 0 || info->netcount != 0) {
39216+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
39217 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
39218 spin_unlock_irqrestore(&info->netlock, flags);
39219 return -EBUSY;
39220@@ -4112,7 +4112,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
39221 printk("%s:hdlcdev_ioctl(%s)\n", __FILE__, dev->name);
39222
39223 /* return error if TTY interface open */
39224- if (info->port.count)
39225+ if (atomic_read(&info->port.count))
39226 return -EBUSY;
39227
39228 if (cmd != SIOCWANDEV)
39229diff --git a/drivers/char/random.c b/drivers/char/random.c
39230index 9cd6968..6416f00 100644
39231--- a/drivers/char/random.c
39232+++ b/drivers/char/random.c
39233@@ -289,9 +289,6 @@
39234 /*
39235 * To allow fractional bits to be tracked, the entropy_count field is
39236 * denominated in units of 1/8th bits.
39237- *
39238- * 2*(ENTROPY_SHIFT + log2(poolbits)) must <= 31, or the multiply in
39239- * credit_entropy_bits() needs to be 64 bits wide.
39240 */
39241 #define ENTROPY_SHIFT 3
39242 #define ENTROPY_BITS(r) ((r)->entropy_count >> ENTROPY_SHIFT)
39243@@ -439,9 +436,9 @@ struct entropy_store {
39244 };
39245
39246 static void push_to_pool(struct work_struct *work);
39247-static __u32 input_pool_data[INPUT_POOL_WORDS];
39248-static __u32 blocking_pool_data[OUTPUT_POOL_WORDS];
39249-static __u32 nonblocking_pool_data[OUTPUT_POOL_WORDS];
39250+static __u32 input_pool_data[INPUT_POOL_WORDS] __latent_entropy;
39251+static __u32 blocking_pool_data[OUTPUT_POOL_WORDS] __latent_entropy;
39252+static __u32 nonblocking_pool_data[OUTPUT_POOL_WORDS] __latent_entropy;
39253
39254 static struct entropy_store input_pool = {
39255 .poolinfo = &poolinfo_table[0],
39256@@ -635,7 +632,7 @@ retry:
39257 /* The +2 corresponds to the /4 in the denominator */
39258
39259 do {
39260- unsigned int anfrac = min(pnfrac, pool_size/2);
39261+ u64 anfrac = min(pnfrac, pool_size/2);
39262 unsigned int add =
39263 ((pool_size - entropy_count)*anfrac*3) >> s;
39264
39265@@ -1207,7 +1204,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
39266
39267 extract_buf(r, tmp);
39268 i = min_t(int, nbytes, EXTRACT_SIZE);
39269- if (copy_to_user(buf, tmp, i)) {
39270+ if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
39271 ret = -EFAULT;
39272 break;
39273 }
39274@@ -1590,7 +1587,7 @@ static char sysctl_bootid[16];
39275 static int proc_do_uuid(struct ctl_table *table, int write,
39276 void __user *buffer, size_t *lenp, loff_t *ppos)
39277 {
39278- struct ctl_table fake_table;
39279+ ctl_table_no_const fake_table;
39280 unsigned char buf[64], tmp_uuid[16], *uuid;
39281
39282 uuid = table->data;
39283@@ -1620,7 +1617,7 @@ static int proc_do_uuid(struct ctl_table *table, int write,
39284 static int proc_do_entropy(struct ctl_table *table, int write,
39285 void __user *buffer, size_t *lenp, loff_t *ppos)
39286 {
39287- struct ctl_table fake_table;
39288+ ctl_table_no_const fake_table;
39289 int entropy_count;
39290
39291 entropy_count = *(int *)table->data >> ENTROPY_SHIFT;
39292diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
39293index e496dae..3db53b6 100644
39294--- a/drivers/char/sonypi.c
39295+++ b/drivers/char/sonypi.c
39296@@ -54,6 +54,7 @@
39297
39298 #include <asm/uaccess.h>
39299 #include <asm/io.h>
39300+#include <asm/local.h>
39301
39302 #include <linux/sonypi.h>
39303
39304@@ -490,7 +491,7 @@ static struct sonypi_device {
39305 spinlock_t fifo_lock;
39306 wait_queue_head_t fifo_proc_list;
39307 struct fasync_struct *fifo_async;
39308- int open_count;
39309+ local_t open_count;
39310 int model;
39311 struct input_dev *input_jog_dev;
39312 struct input_dev *input_key_dev;
39313@@ -892,7 +893,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
39314 static int sonypi_misc_release(struct inode *inode, struct file *file)
39315 {
39316 mutex_lock(&sonypi_device.lock);
39317- sonypi_device.open_count--;
39318+ local_dec(&sonypi_device.open_count);
39319 mutex_unlock(&sonypi_device.lock);
39320 return 0;
39321 }
39322@@ -901,9 +902,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
39323 {
39324 mutex_lock(&sonypi_device.lock);
39325 /* Flush input queue on first open */
39326- if (!sonypi_device.open_count)
39327+ if (!local_read(&sonypi_device.open_count))
39328 kfifo_reset(&sonypi_device.fifo);
39329- sonypi_device.open_count++;
39330+ local_inc(&sonypi_device.open_count);
39331 mutex_unlock(&sonypi_device.lock);
39332
39333 return 0;
39334@@ -1491,7 +1492,7 @@ static struct platform_driver sonypi_driver = {
39335
39336 static struct platform_device *sonypi_platform_device;
39337
39338-static struct dmi_system_id __initdata sonypi_dmi_table[] = {
39339+static const struct dmi_system_id __initconst sonypi_dmi_table[] = {
39340 {
39341 .ident = "Sony Vaio",
39342 .matches = {
39343diff --git a/drivers/char/tpm/tpm_acpi.c b/drivers/char/tpm/tpm_acpi.c
39344index 565a947..dcdc06e 100644
39345--- a/drivers/char/tpm/tpm_acpi.c
39346+++ b/drivers/char/tpm/tpm_acpi.c
39347@@ -98,11 +98,12 @@ int read_log(struct tpm_bios_log *log)
39348 virt = acpi_os_map_iomem(start, len);
39349 if (!virt) {
39350 kfree(log->bios_event_log);
39351+ log->bios_event_log = NULL;
39352 printk("%s: ERROR - Unable to map memory\n", __func__);
39353 return -EIO;
39354 }
39355
39356- memcpy_fromio(log->bios_event_log, virt, len);
39357+ memcpy_fromio(log->bios_event_log, (const char __force_kernel *)virt, len);
39358
39359 acpi_os_unmap_iomem(virt, len);
39360 return 0;
39361diff --git a/drivers/char/tpm/tpm_eventlog.c b/drivers/char/tpm/tpm_eventlog.c
39362index 3a56a13..f8cbd25 100644
39363--- a/drivers/char/tpm/tpm_eventlog.c
39364+++ b/drivers/char/tpm/tpm_eventlog.c
39365@@ -95,7 +95,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
39366 event = addr;
39367
39368 if ((event->event_type == 0 && event->event_size == 0) ||
39369- ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
39370+ (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
39371 return NULL;
39372
39373 return addr;
39374@@ -120,7 +120,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
39375 return NULL;
39376
39377 if ((event->event_type == 0 && event->event_size == 0) ||
39378- ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
39379+ (event->event_size >= limit - v - sizeof(struct tcpa_event)))
39380 return NULL;
39381
39382 (*pos)++;
39383@@ -213,7 +213,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
39384 int i;
39385
39386 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
39387- seq_putc(m, data[i]);
39388+ if (!seq_putc(m, data[i]))
39389+ return -EFAULT;
39390
39391 return 0;
39392 }
39393diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
39394index 72d7028..1586601 100644
39395--- a/drivers/char/virtio_console.c
39396+++ b/drivers/char/virtio_console.c
39397@@ -685,7 +685,7 @@ static ssize_t fill_readbuf(struct port *port, char __user *out_buf,
39398 if (to_user) {
39399 ssize_t ret;
39400
39401- ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
39402+ ret = copy_to_user((char __force_user *)out_buf, buf->buf + buf->offset, out_count);
39403 if (ret)
39404 return -EFAULT;
39405 } else {
39406@@ -789,7 +789,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
39407 if (!port_has_data(port) && !port->host_connected)
39408 return 0;
39409
39410- return fill_readbuf(port, ubuf, count, true);
39411+ return fill_readbuf(port, (char __force_kernel *)ubuf, count, true);
39412 }
39413
39414 static int wait_port_writable(struct port *port, bool nonblock)
39415diff --git a/drivers/clk/clk-composite.c b/drivers/clk/clk-composite.c
39416index 956b7e5..b655045 100644
39417--- a/drivers/clk/clk-composite.c
39418+++ b/drivers/clk/clk-composite.c
39419@@ -197,7 +197,7 @@ struct clk *clk_register_composite(struct device *dev, const char *name,
39420 struct clk *clk;
39421 struct clk_init_data init;
39422 struct clk_composite *composite;
39423- struct clk_ops *clk_composite_ops;
39424+ clk_ops_no_const *clk_composite_ops;
39425
39426 composite = kzalloc(sizeof(*composite), GFP_KERNEL);
39427 if (!composite) {
39428diff --git a/drivers/clk/hisilicon/clk-hi3620.c b/drivers/clk/hisilicon/clk-hi3620.c
39429index 2e4f6d4..b4cf487 100644
39430--- a/drivers/clk/hisilicon/clk-hi3620.c
39431+++ b/drivers/clk/hisilicon/clk-hi3620.c
39432@@ -38,44 +38,44 @@
39433 #include "clk.h"
39434
39435 /* clock parent list */
39436-static const char *timer0_mux_p[] __initconst = { "osc32k", "timerclk01", };
39437-static const char *timer1_mux_p[] __initconst = { "osc32k", "timerclk01", };
39438-static const char *timer2_mux_p[] __initconst = { "osc32k", "timerclk23", };
39439-static const char *timer3_mux_p[] __initconst = { "osc32k", "timerclk23", };
39440-static const char *timer4_mux_p[] __initconst = { "osc32k", "timerclk45", };
39441-static const char *timer5_mux_p[] __initconst = { "osc32k", "timerclk45", };
39442-static const char *timer6_mux_p[] __initconst = { "osc32k", "timerclk67", };
39443-static const char *timer7_mux_p[] __initconst = { "osc32k", "timerclk67", };
39444-static const char *timer8_mux_p[] __initconst = { "osc32k", "timerclk89", };
39445-static const char *timer9_mux_p[] __initconst = { "osc32k", "timerclk89", };
39446-static const char *uart0_mux_p[] __initconst = { "osc26m", "pclk", };
39447-static const char *uart1_mux_p[] __initconst = { "osc26m", "pclk", };
39448-static const char *uart2_mux_p[] __initconst = { "osc26m", "pclk", };
39449-static const char *uart3_mux_p[] __initconst = { "osc26m", "pclk", };
39450-static const char *uart4_mux_p[] __initconst = { "osc26m", "pclk", };
39451-static const char *spi0_mux_p[] __initconst = { "osc26m", "rclk_cfgaxi", };
39452-static const char *spi1_mux_p[] __initconst = { "osc26m", "rclk_cfgaxi", };
39453-static const char *spi2_mux_p[] __initconst = { "osc26m", "rclk_cfgaxi", };
39454+static const char * const timer0_mux_p[] __initconst = { "osc32k", "timerclk01", };
39455+static const char * const timer1_mux_p[] __initconst = { "osc32k", "timerclk01", };
39456+static const char * const timer2_mux_p[] __initconst = { "osc32k", "timerclk23", };
39457+static const char * const timer3_mux_p[] __initconst = { "osc32k", "timerclk23", };
39458+static const char * const timer4_mux_p[] __initconst = { "osc32k", "timerclk45", };
39459+static const char * const timer5_mux_p[] __initconst = { "osc32k", "timerclk45", };
39460+static const char * const timer6_mux_p[] __initconst = { "osc32k", "timerclk67", };
39461+static const char * const timer7_mux_p[] __initconst = { "osc32k", "timerclk67", };
39462+static const char * const timer8_mux_p[] __initconst = { "osc32k", "timerclk89", };
39463+static const char * const timer9_mux_p[] __initconst = { "osc32k", "timerclk89", };
39464+static const char * const uart0_mux_p[] __initconst = { "osc26m", "pclk", };
39465+static const char * const uart1_mux_p[] __initconst = { "osc26m", "pclk", };
39466+static const char * const uart2_mux_p[] __initconst = { "osc26m", "pclk", };
39467+static const char * const uart3_mux_p[] __initconst = { "osc26m", "pclk", };
39468+static const char * const uart4_mux_p[] __initconst = { "osc26m", "pclk", };
39469+static const char * const spi0_mux_p[] __initconst = { "osc26m", "rclk_cfgaxi", };
39470+static const char * const spi1_mux_p[] __initconst = { "osc26m", "rclk_cfgaxi", };
39471+static const char * const spi2_mux_p[] __initconst = { "osc26m", "rclk_cfgaxi", };
39472 /* share axi parent */
39473-static const char *saxi_mux_p[] __initconst = { "armpll3", "armpll2", };
39474-static const char *pwm0_mux_p[] __initconst = { "osc32k", "osc26m", };
39475-static const char *pwm1_mux_p[] __initconst = { "osc32k", "osc26m", };
39476-static const char *sd_mux_p[] __initconst = { "armpll2", "armpll3", };
39477-static const char *mmc1_mux_p[] __initconst = { "armpll2", "armpll3", };
39478-static const char *mmc1_mux2_p[] __initconst = { "osc26m", "mmc1_div", };
39479-static const char *g2d_mux_p[] __initconst = { "armpll2", "armpll3", };
39480-static const char *venc_mux_p[] __initconst = { "armpll2", "armpll3", };
39481-static const char *vdec_mux_p[] __initconst = { "armpll2", "armpll3", };
39482-static const char *vpp_mux_p[] __initconst = { "armpll2", "armpll3", };
39483-static const char *edc0_mux_p[] __initconst = { "armpll2", "armpll3", };
39484-static const char *ldi0_mux_p[] __initconst = { "armpll2", "armpll4",
39485+static const char * const saxi_mux_p[] __initconst = { "armpll3", "armpll2", };
39486+static const char * const pwm0_mux_p[] __initconst = { "osc32k", "osc26m", };
39487+static const char * const pwm1_mux_p[] __initconst = { "osc32k", "osc26m", };
39488+static const char * const sd_mux_p[] __initconst = { "armpll2", "armpll3", };
39489+static const char * const mmc1_mux_p[] __initconst = { "armpll2", "armpll3", };
39490+static const char * const mmc1_mux2_p[] __initconst = { "osc26m", "mmc1_div", };
39491+static const char * const g2d_mux_p[] __initconst = { "armpll2", "armpll3", };
39492+static const char * const venc_mux_p[] __initconst = { "armpll2", "armpll3", };
39493+static const char * const vdec_mux_p[] __initconst = { "armpll2", "armpll3", };
39494+static const char * const vpp_mux_p[] __initconst = { "armpll2", "armpll3", };
39495+static const char * const edc0_mux_p[] __initconst = { "armpll2", "armpll3", };
39496+static const char * const ldi0_mux_p[] __initconst = { "armpll2", "armpll4",
39497 "armpll3", "armpll5", };
39498-static const char *edc1_mux_p[] __initconst = { "armpll2", "armpll3", };
39499-static const char *ldi1_mux_p[] __initconst = { "armpll2", "armpll4",
39500+static const char * const edc1_mux_p[] __initconst = { "armpll2", "armpll3", };
39501+static const char * const ldi1_mux_p[] __initconst = { "armpll2", "armpll4",
39502 "armpll3", "armpll5", };
39503-static const char *rclk_hsic_p[] __initconst = { "armpll3", "armpll2", };
39504-static const char *mmc2_mux_p[] __initconst = { "armpll2", "armpll3", };
39505-static const char *mmc3_mux_p[] __initconst = { "armpll2", "armpll3", };
39506+static const char * const rclk_hsic_p[] __initconst = { "armpll3", "armpll2", };
39507+static const char * const mmc2_mux_p[] __initconst = { "armpll2", "armpll3", };
39508+static const char * const mmc3_mux_p[] __initconst = { "armpll2", "armpll3", };
39509
39510
39511 /* fixed rate clocks */
39512diff --git a/drivers/clk/hisilicon/clk-hix5hd2.c b/drivers/clk/hisilicon/clk-hix5hd2.c
39513index 3f369c6..05f9ffd 100644
39514--- a/drivers/clk/hisilicon/clk-hix5hd2.c
39515+++ b/drivers/clk/hisilicon/clk-hix5hd2.c
39516@@ -46,15 +46,15 @@ static struct hisi_fixed_rate_clock hix5hd2_fixed_rate_clks[] __initdata = {
39517 { HIX5HD2_FIXED_83M, "83m", NULL, CLK_IS_ROOT, 83333333, },
39518 };
39519
39520-static const char *sfc_mux_p[] __initconst = {
39521+static const char * const sfc_mux_p[] __initconst = {
39522 "24m", "150m", "200m", "100m", "75m", };
39523 static u32 sfc_mux_table[] = {0, 4, 5, 6, 7};
39524
39525-static const char *sdio_mux_p[] __initconst = {
39526+static const char * const sdio_mux_p[] __initconst = {
39527 "75m", "100m", "50m", "15m", };
39528 static u32 sdio_mux_table[] = {0, 1, 2, 3};
39529
39530-static const char *fephy_mux_p[] __initconst = { "25m", "125m"};
39531+static const char * const fephy_mux_p[] __initconst = { "25m", "125m"};
39532 static u32 fephy_mux_table[] = {0, 1};
39533
39534
39535diff --git a/drivers/clk/rockchip/clk-rk3188.c b/drivers/clk/rockchip/clk-rk3188.c
39536index 7eb684c..147c6fc 100644
39537--- a/drivers/clk/rockchip/clk-rk3188.c
39538+++ b/drivers/clk/rockchip/clk-rk3188.c
39539@@ -704,7 +704,7 @@ static struct rockchip_clk_branch rk3188_clk_branches[] __initdata = {
39540 GATE(ACLK_GPS, "aclk_gps", "aclk_peri", 0, RK2928_CLKGATE_CON(8), 13, GFLAGS),
39541 };
39542
39543-static const char *rk3188_critical_clocks[] __initconst = {
39544+static const char * const rk3188_critical_clocks[] __initconst = {
39545 "aclk_cpu",
39546 "aclk_peri",
39547 "hclk_peri",
39548diff --git a/drivers/clk/rockchip/clk-rk3288.c b/drivers/clk/rockchip/clk-rk3288.c
39549index 05d7a0b..4fc131c 100644
39550--- a/drivers/clk/rockchip/clk-rk3288.c
39551+++ b/drivers/clk/rockchip/clk-rk3288.c
39552@@ -771,7 +771,7 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
39553 GATE(0, "pclk_isp_in", "ext_isp", 0, RK3288_CLKGATE_CON(16), 3, GFLAGS),
39554 };
39555
39556-static const char *rk3288_critical_clocks[] __initconst = {
39557+static const char * const rk3288_critical_clocks[] __initconst = {
39558 "aclk_cpu",
39559 "aclk_peri",
39560 "hclk_peri",
39561diff --git a/drivers/clk/rockchip/clk.h b/drivers/clk/rockchip/clk.h
39562index 58d2e3b..0c21b0d 100644
39563--- a/drivers/clk/rockchip/clk.h
39564+++ b/drivers/clk/rockchip/clk.h
39565@@ -182,7 +182,7 @@ struct clk *rockchip_clk_register_mmc(const char *name,
39566 const char **parent_names, u8 num_parents,
39567 void __iomem *reg, int shift);
39568
39569-#define PNAME(x) static const char *x[] __initconst
39570+#define PNAME(x) static const char * const x[] __initconst
39571
39572 enum rockchip_clk_branch_type {
39573 branch_composite,
39574diff --git a/drivers/clk/samsung/clk.h b/drivers/clk/samsung/clk.h
39575index e4c7538..99c50cd 100644
39576--- a/drivers/clk/samsung/clk.h
39577+++ b/drivers/clk/samsung/clk.h
39578@@ -260,7 +260,7 @@ struct samsung_gate_clock {
39579 #define GATE_DA(_id, dname, cname, pname, o, b, f, gf, a) \
39580 __GATE(_id, dname, cname, pname, o, b, f, gf, a)
39581
39582-#define PNAME(x) static const char *x[] __initdata
39583+#define PNAME(x) static const char * const x[] __initconst
39584
39585 /**
39586 * struct samsung_clk_reg_dump: register dump of clock controller registers.
39587diff --git a/drivers/clk/socfpga/clk-gate.c b/drivers/clk/socfpga/clk-gate.c
39588index dd3a78c..386d49c 100644
39589--- a/drivers/clk/socfpga/clk-gate.c
39590+++ b/drivers/clk/socfpga/clk-gate.c
39591@@ -22,6 +22,7 @@
39592 #include <linux/mfd/syscon.h>
39593 #include <linux/of.h>
39594 #include <linux/regmap.h>
39595+#include <asm/pgtable.h>
39596
39597 #include "clk.h"
39598
39599@@ -174,7 +175,7 @@ static int socfpga_clk_prepare(struct clk_hw *hwclk)
39600 return 0;
39601 }
39602
39603-static struct clk_ops gateclk_ops = {
39604+static clk_ops_no_const gateclk_ops __read_only = {
39605 .prepare = socfpga_clk_prepare,
39606 .recalc_rate = socfpga_clk_recalc_rate,
39607 .get_parent = socfpga_clk_get_parent,
39608@@ -208,8 +209,10 @@ static void __init __socfpga_gate_init(struct device_node *node,
39609 socfpga_clk->hw.reg = clk_mgr_base_addr + clk_gate[0];
39610 socfpga_clk->hw.bit_idx = clk_gate[1];
39611
39612- gateclk_ops.enable = clk_gate_ops.enable;
39613- gateclk_ops.disable = clk_gate_ops.disable;
39614+ pax_open_kernel();
39615+ *(void **)&gateclk_ops.enable = clk_gate_ops.enable;
39616+ *(void **)&gateclk_ops.disable = clk_gate_ops.disable;
39617+ pax_close_kernel();
39618 }
39619
39620 rc = of_property_read_u32(node, "fixed-divider", &fixed_div);
39621diff --git a/drivers/clk/socfpga/clk-pll.c b/drivers/clk/socfpga/clk-pll.c
39622index de6da95..c98278b 100644
39623--- a/drivers/clk/socfpga/clk-pll.c
39624+++ b/drivers/clk/socfpga/clk-pll.c
39625@@ -21,6 +21,7 @@
39626 #include <linux/io.h>
39627 #include <linux/of.h>
39628 #include <linux/of_address.h>
39629+#include <asm/pgtable.h>
39630
39631 #include "clk.h"
39632
39633@@ -76,7 +77,7 @@ static u8 clk_pll_get_parent(struct clk_hw *hwclk)
39634 CLK_MGR_PLL_CLK_SRC_MASK;
39635 }
39636
39637-static struct clk_ops clk_pll_ops = {
39638+static clk_ops_no_const clk_pll_ops __read_only = {
39639 .recalc_rate = clk_pll_recalc_rate,
39640 .get_parent = clk_pll_get_parent,
39641 };
39642@@ -120,8 +121,10 @@ static __init struct clk *__socfpga_pll_init(struct device_node *node,
39643 pll_clk->hw.hw.init = &init;
39644
39645 pll_clk->hw.bit_idx = SOCFPGA_PLL_EXT_ENA;
39646- clk_pll_ops.enable = clk_gate_ops.enable;
39647- clk_pll_ops.disable = clk_gate_ops.disable;
39648+ pax_open_kernel();
39649+ *(void **)&clk_pll_ops.enable = clk_gate_ops.enable;
39650+ *(void **)&clk_pll_ops.disable = clk_gate_ops.disable;
39651+ pax_close_kernel();
39652
39653 clk = clk_register(NULL, &pll_clk->hw.hw);
39654 if (WARN_ON(IS_ERR(clk))) {
39655diff --git a/drivers/clk/ti/composite.c b/drivers/clk/ti/composite.c
39656index 3654f61..81abe81 100644
39657--- a/drivers/clk/ti/composite.c
39658+++ b/drivers/clk/ti/composite.c
39659@@ -69,7 +69,7 @@ struct component_clk {
39660 struct list_head link;
39661 };
39662
39663-static const char * __initconst component_clk_types[] = {
39664+static const char * const __initconst component_clk_types[] = {
39665 "gate", "divider", "mux"
39666 };
39667
39668diff --git a/drivers/clk/zynq/clkc.c b/drivers/clk/zynq/clkc.c
39669index f870aad..04ba1e4 100644
39670--- a/drivers/clk/zynq/clkc.c
39671+++ b/drivers/clk/zynq/clkc.c
39672@@ -85,22 +85,22 @@ static DEFINE_SPINLOCK(canmioclk_lock);
39673 static DEFINE_SPINLOCK(dbgclk_lock);
39674 static DEFINE_SPINLOCK(aperclk_lock);
39675
39676-static const char *armpll_parents[] __initconst = {"armpll_int", "ps_clk"};
39677-static const char *ddrpll_parents[] __initconst = {"ddrpll_int", "ps_clk"};
39678-static const char *iopll_parents[] __initconst = {"iopll_int", "ps_clk"};
39679-static const char *gem0_mux_parents[] __initconst = {"gem0_div1", "dummy_name"};
39680-static const char *gem1_mux_parents[] __initconst = {"gem1_div1", "dummy_name"};
39681-static const char *can0_mio_mux2_parents[] __initconst = {"can0_gate",
39682+static const char * const armpll_parents[] __initconst = {"armpll_int", "ps_clk"};
39683+static const char * const ddrpll_parents[] __initconst = {"ddrpll_int", "ps_clk"};
39684+static const char * const iopll_parents[] __initconst = {"iopll_int", "ps_clk"};
39685+static const char * gem0_mux_parents[] __initdata = {"gem0_div1", "dummy_name"};
39686+static const char * gem1_mux_parents[] __initdata = {"gem1_div1", "dummy_name"};
39687+static const char * const can0_mio_mux2_parents[] __initconst = {"can0_gate",
39688 "can0_mio_mux"};
39689-static const char *can1_mio_mux2_parents[] __initconst = {"can1_gate",
39690+static const char * const can1_mio_mux2_parents[] __initconst = {"can1_gate",
39691 "can1_mio_mux"};
39692-static const char *dbg_emio_mux_parents[] __initconst = {"dbg_div",
39693+static const char * dbg_emio_mux_parents[] __initdata = {"dbg_div",
39694 "dummy_name"};
39695
39696-static const char *dbgtrc_emio_input_names[] __initconst = {"trace_emio_clk"};
39697-static const char *gem0_emio_input_names[] __initconst = {"gem0_emio_clk"};
39698-static const char *gem1_emio_input_names[] __initconst = {"gem1_emio_clk"};
39699-static const char *swdt_ext_clk_input_names[] __initconst = {"swdt_ext_clk"};
39700+static const char * const dbgtrc_emio_input_names[] __initconst = {"trace_emio_clk"};
39701+static const char * const gem0_emio_input_names[] __initconst = {"gem0_emio_clk"};
39702+static const char * const gem1_emio_input_names[] __initconst = {"gem1_emio_clk"};
39703+static const char * const swdt_ext_clk_input_names[] __initconst = {"swdt_ext_clk"};
39704
39705 static void __init zynq_clk_register_fclk(enum zynq_clk fclk,
39706 const char *clk_name, void __iomem *fclk_ctrl_reg,
39707diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
39708index b0c18ed..1713a80 100644
39709--- a/drivers/cpufreq/acpi-cpufreq.c
39710+++ b/drivers/cpufreq/acpi-cpufreq.c
39711@@ -675,8 +675,11 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
39712 data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu);
39713 per_cpu(acfreq_data, cpu) = data;
39714
39715- if (cpu_has(c, X86_FEATURE_CONSTANT_TSC))
39716- acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
39717+ if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
39718+ pax_open_kernel();
39719+ *(u8 *)&acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
39720+ pax_close_kernel();
39721+ }
39722
39723 result = acpi_processor_register_performance(data->acpi_data, cpu);
39724 if (result)
39725@@ -809,7 +812,9 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
39726 policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu);
39727 break;
39728 case ACPI_ADR_SPACE_FIXED_HARDWARE:
39729- acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
39730+ pax_open_kernel();
39731+ *(void **)&acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
39732+ pax_close_kernel();
39733 break;
39734 default:
39735 break;
39736@@ -903,8 +908,10 @@ static void __init acpi_cpufreq_boost_init(void)
39737 if (!msrs)
39738 return;
39739
39740- acpi_cpufreq_driver.boost_supported = true;
39741- acpi_cpufreq_driver.boost_enabled = boost_state(0);
39742+ pax_open_kernel();
39743+ *(bool *)&acpi_cpufreq_driver.boost_supported = true;
39744+ *(bool *)&acpi_cpufreq_driver.boost_enabled = boost_state(0);
39745+ pax_close_kernel();
39746
39747 cpu_notifier_register_begin();
39748
39749diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
39750index bab67db..91af7e3 100644
39751--- a/drivers/cpufreq/cpufreq-dt.c
39752+++ b/drivers/cpufreq/cpufreq-dt.c
39753@@ -392,7 +392,9 @@ static int dt_cpufreq_probe(struct platform_device *pdev)
39754 if (!IS_ERR(cpu_reg))
39755 regulator_put(cpu_reg);
39756
39757- dt_cpufreq_driver.driver_data = dev_get_platdata(&pdev->dev);
39758+ pax_open_kernel();
39759+ *(void **)&dt_cpufreq_driver.driver_data = dev_get_platdata(&pdev->dev);
39760+ pax_close_kernel();
39761
39762 ret = cpufreq_register_driver(&dt_cpufreq_driver);
39763 if (ret)
39764diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
39765index 8ae655c..3141442 100644
39766--- a/drivers/cpufreq/cpufreq.c
39767+++ b/drivers/cpufreq/cpufreq.c
39768@@ -2108,7 +2108,7 @@ void cpufreq_unregister_governor(struct cpufreq_governor *governor)
39769 }
39770
39771 mutex_lock(&cpufreq_governor_mutex);
39772- list_del(&governor->governor_list);
39773+ pax_list_del(&governor->governor_list);
39774 mutex_unlock(&cpufreq_governor_mutex);
39775 return;
39776 }
39777@@ -2323,7 +2323,7 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb,
39778 return NOTIFY_OK;
39779 }
39780
39781-static struct notifier_block __refdata cpufreq_cpu_notifier = {
39782+static struct notifier_block cpufreq_cpu_notifier = {
39783 .notifier_call = cpufreq_cpu_callback,
39784 };
39785
39786@@ -2363,13 +2363,17 @@ int cpufreq_boost_trigger_state(int state)
39787 return 0;
39788
39789 write_lock_irqsave(&cpufreq_driver_lock, flags);
39790- cpufreq_driver->boost_enabled = state;
39791+ pax_open_kernel();
39792+ *(bool *)&cpufreq_driver->boost_enabled = state;
39793+ pax_close_kernel();
39794 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
39795
39796 ret = cpufreq_driver->set_boost(state);
39797 if (ret) {
39798 write_lock_irqsave(&cpufreq_driver_lock, flags);
39799- cpufreq_driver->boost_enabled = !state;
39800+ pax_open_kernel();
39801+ *(bool *)&cpufreq_driver->boost_enabled = !state;
39802+ pax_close_kernel();
39803 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
39804
39805 pr_err("%s: Cannot %s BOOST\n",
39806@@ -2434,16 +2438,22 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
39807 cpufreq_driver = driver_data;
39808 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
39809
39810- if (driver_data->setpolicy)
39811- driver_data->flags |= CPUFREQ_CONST_LOOPS;
39812+ if (driver_data->setpolicy) {
39813+ pax_open_kernel();
39814+ *(u8 *)&driver_data->flags |= CPUFREQ_CONST_LOOPS;
39815+ pax_close_kernel();
39816+ }
39817
39818 if (cpufreq_boost_supported()) {
39819 /*
39820 * Check if driver provides function to enable boost -
39821 * if not, use cpufreq_boost_set_sw as default
39822 */
39823- if (!cpufreq_driver->set_boost)
39824- cpufreq_driver->set_boost = cpufreq_boost_set_sw;
39825+ if (!cpufreq_driver->set_boost) {
39826+ pax_open_kernel();
39827+ *(void **)&cpufreq_driver->set_boost = cpufreq_boost_set_sw;
39828+ pax_close_kernel();
39829+ }
39830
39831 ret = cpufreq_sysfs_create_file(&boost.attr);
39832 if (ret) {
39833diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
39834index 1b44496..b80ff5e 100644
39835--- a/drivers/cpufreq/cpufreq_governor.c
39836+++ b/drivers/cpufreq/cpufreq_governor.c
39837@@ -245,7 +245,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
39838 struct dbs_data *dbs_data;
39839 struct od_cpu_dbs_info_s *od_dbs_info = NULL;
39840 struct cs_cpu_dbs_info_s *cs_dbs_info = NULL;
39841- struct od_ops *od_ops = NULL;
39842+ const struct od_ops *od_ops = NULL;
39843 struct od_dbs_tuners *od_tuners = NULL;
39844 struct cs_dbs_tuners *cs_tuners = NULL;
39845 struct cpu_dbs_common_info *cpu_cdbs;
39846@@ -311,7 +311,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
39847
39848 if ((cdata->governor == GOV_CONSERVATIVE) &&
39849 (!policy->governor->initialized)) {
39850- struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
39851+ const struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
39852
39853 cpufreq_register_notifier(cs_ops->notifier_block,
39854 CPUFREQ_TRANSITION_NOTIFIER);
39855@@ -331,7 +331,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
39856
39857 if ((dbs_data->cdata->governor == GOV_CONSERVATIVE) &&
39858 (policy->governor->initialized == 1)) {
39859- struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
39860+ const struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
39861
39862 cpufreq_unregister_notifier(cs_ops->notifier_block,
39863 CPUFREQ_TRANSITION_NOTIFIER);
39864diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
39865index cc401d1..8197340 100644
39866--- a/drivers/cpufreq/cpufreq_governor.h
39867+++ b/drivers/cpufreq/cpufreq_governor.h
39868@@ -212,7 +212,7 @@ struct common_dbs_data {
39869 void (*exit)(struct dbs_data *dbs_data);
39870
39871 /* Governor specific ops, see below */
39872- void *gov_ops;
39873+ const void *gov_ops;
39874 };
39875
39876 /* Governor Per policy data */
39877@@ -232,7 +232,7 @@ struct od_ops {
39878 unsigned int (*powersave_bias_target)(struct cpufreq_policy *policy,
39879 unsigned int freq_next, unsigned int relation);
39880 void (*freq_increase)(struct cpufreq_policy *policy, unsigned int freq);
39881-};
39882+} __no_const;
39883
39884 struct cs_ops {
39885 struct notifier_block *notifier_block;
39886diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
39887index ad3f38f..8f086cd 100644
39888--- a/drivers/cpufreq/cpufreq_ondemand.c
39889+++ b/drivers/cpufreq/cpufreq_ondemand.c
39890@@ -524,7 +524,7 @@ static void od_exit(struct dbs_data *dbs_data)
39891
39892 define_get_cpu_dbs_routines(od_cpu_dbs_info);
39893
39894-static struct od_ops od_ops = {
39895+static struct od_ops od_ops __read_only = {
39896 .powersave_bias_init_cpu = ondemand_powersave_bias_init_cpu,
39897 .powersave_bias_target = generic_powersave_bias_target,
39898 .freq_increase = dbs_freq_increase,
39899@@ -579,14 +579,18 @@ void od_register_powersave_bias_handler(unsigned int (*f)
39900 (struct cpufreq_policy *, unsigned int, unsigned int),
39901 unsigned int powersave_bias)
39902 {
39903- od_ops.powersave_bias_target = f;
39904+ pax_open_kernel();
39905+ *(void **)&od_ops.powersave_bias_target = f;
39906+ pax_close_kernel();
39907 od_set_powersave_bias(powersave_bias);
39908 }
39909 EXPORT_SYMBOL_GPL(od_register_powersave_bias_handler);
39910
39911 void od_unregister_powersave_bias_handler(void)
39912 {
39913- od_ops.powersave_bias_target = generic_powersave_bias_target;
39914+ pax_open_kernel();
39915+ *(void **)&od_ops.powersave_bias_target = generic_powersave_bias_target;
39916+ pax_close_kernel();
39917 od_set_powersave_bias(0);
39918 }
39919 EXPORT_SYMBOL_GPL(od_unregister_powersave_bias_handler);
39920diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
39921index 2c867a6..2d7d333 100644
39922--- a/drivers/cpufreq/intel_pstate.c
39923+++ b/drivers/cpufreq/intel_pstate.c
39924@@ -133,10 +133,10 @@ struct pstate_funcs {
39925 struct cpu_defaults {
39926 struct pstate_adjust_policy pid_policy;
39927 struct pstate_funcs funcs;
39928-};
39929+} __do_const;
39930
39931 static struct pstate_adjust_policy pid_params;
39932-static struct pstate_funcs pstate_funcs;
39933+static struct pstate_funcs *pstate_funcs;
39934 static int hwp_active;
39935
39936 struct perf_limits {
39937@@ -690,18 +690,18 @@ static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
39938
39939 cpu->pstate.current_pstate = pstate;
39940
39941- pstate_funcs.set(cpu, pstate);
39942+ pstate_funcs->set(cpu, pstate);
39943 }
39944
39945 static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
39946 {
39947- cpu->pstate.min_pstate = pstate_funcs.get_min();
39948- cpu->pstate.max_pstate = pstate_funcs.get_max();
39949- cpu->pstate.turbo_pstate = pstate_funcs.get_turbo();
39950- cpu->pstate.scaling = pstate_funcs.get_scaling();
39951+ cpu->pstate.min_pstate = pstate_funcs->get_min();
39952+ cpu->pstate.max_pstate = pstate_funcs->get_max();
39953+ cpu->pstate.turbo_pstate = pstate_funcs->get_turbo();
39954+ cpu->pstate.scaling = pstate_funcs->get_scaling();
39955
39956- if (pstate_funcs.get_vid)
39957- pstate_funcs.get_vid(cpu);
39958+ if (pstate_funcs->get_vid)
39959+ pstate_funcs->get_vid(cpu);
39960 intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate);
39961 }
39962
39963@@ -1030,9 +1030,9 @@ static int intel_pstate_msrs_not_valid(void)
39964 rdmsrl(MSR_IA32_APERF, aperf);
39965 rdmsrl(MSR_IA32_MPERF, mperf);
39966
39967- if (!pstate_funcs.get_max() ||
39968- !pstate_funcs.get_min() ||
39969- !pstate_funcs.get_turbo())
39970+ if (!pstate_funcs->get_max() ||
39971+ !pstate_funcs->get_min() ||
39972+ !pstate_funcs->get_turbo())
39973 return -ENODEV;
39974
39975 rdmsrl(MSR_IA32_APERF, tmp);
39976@@ -1046,7 +1046,7 @@ static int intel_pstate_msrs_not_valid(void)
39977 return 0;
39978 }
39979
39980-static void copy_pid_params(struct pstate_adjust_policy *policy)
39981+static void copy_pid_params(const struct pstate_adjust_policy *policy)
39982 {
39983 pid_params.sample_rate_ms = policy->sample_rate_ms;
39984 pid_params.p_gain_pct = policy->p_gain_pct;
39985@@ -1058,12 +1058,7 @@ static void copy_pid_params(struct pstate_adjust_policy *policy)
39986
39987 static void copy_cpu_funcs(struct pstate_funcs *funcs)
39988 {
39989- pstate_funcs.get_max = funcs->get_max;
39990- pstate_funcs.get_min = funcs->get_min;
39991- pstate_funcs.get_turbo = funcs->get_turbo;
39992- pstate_funcs.get_scaling = funcs->get_scaling;
39993- pstate_funcs.set = funcs->set;
39994- pstate_funcs.get_vid = funcs->get_vid;
39995+ pstate_funcs = funcs;
39996 }
39997
39998 #if IS_ENABLED(CONFIG_ACPI)
39999diff --git a/drivers/cpufreq/p4-clockmod.c b/drivers/cpufreq/p4-clockmod.c
40000index 529cfd9..0e28fff 100644
40001--- a/drivers/cpufreq/p4-clockmod.c
40002+++ b/drivers/cpufreq/p4-clockmod.c
40003@@ -134,10 +134,14 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
40004 case 0x0F: /* Core Duo */
40005 case 0x16: /* Celeron Core */
40006 case 0x1C: /* Atom */
40007- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
40008+ pax_open_kernel();
40009+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
40010+ pax_close_kernel();
40011 return speedstep_get_frequency(SPEEDSTEP_CPU_PCORE);
40012 case 0x0D: /* Pentium M (Dothan) */
40013- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
40014+ pax_open_kernel();
40015+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
40016+ pax_close_kernel();
40017 /* fall through */
40018 case 0x09: /* Pentium M (Banias) */
40019 return speedstep_get_frequency(SPEEDSTEP_CPU_PM);
40020@@ -149,7 +153,9 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
40021
40022 /* on P-4s, the TSC runs with constant frequency independent whether
40023 * throttling is active or not. */
40024- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
40025+ pax_open_kernel();
40026+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
40027+ pax_close_kernel();
40028
40029 if (speedstep_detect_processor() == SPEEDSTEP_CPU_P4M) {
40030 printk(KERN_WARNING PFX "Warning: Pentium 4-M detected. "
40031diff --git a/drivers/cpufreq/sparc-us3-cpufreq.c b/drivers/cpufreq/sparc-us3-cpufreq.c
40032index 9bb42ba..b01b4a2 100644
40033--- a/drivers/cpufreq/sparc-us3-cpufreq.c
40034+++ b/drivers/cpufreq/sparc-us3-cpufreq.c
40035@@ -18,14 +18,12 @@
40036 #include <asm/head.h>
40037 #include <asm/timer.h>
40038
40039-static struct cpufreq_driver *cpufreq_us3_driver;
40040-
40041 struct us3_freq_percpu_info {
40042 struct cpufreq_frequency_table table[4];
40043 };
40044
40045 /* Indexed by cpu number. */
40046-static struct us3_freq_percpu_info *us3_freq_table;
40047+static struct us3_freq_percpu_info us3_freq_table[NR_CPUS];
40048
40049 /* UltraSPARC-III has three dividers: 1, 2, and 32. These are controlled
40050 * in the Safari config register.
40051@@ -156,16 +154,27 @@ static int __init us3_freq_cpu_init(struct cpufreq_policy *policy)
40052
40053 static int us3_freq_cpu_exit(struct cpufreq_policy *policy)
40054 {
40055- if (cpufreq_us3_driver)
40056- us3_freq_target(policy, 0);
40057+ us3_freq_target(policy, 0);
40058
40059 return 0;
40060 }
40061
40062+static int __init us3_freq_init(void);
40063+static void __exit us3_freq_exit(void);
40064+
40065+static struct cpufreq_driver cpufreq_us3_driver = {
40066+ .init = us3_freq_cpu_init,
40067+ .verify = cpufreq_generic_frequency_table_verify,
40068+ .target_index = us3_freq_target,
40069+ .get = us3_freq_get,
40070+ .exit = us3_freq_cpu_exit,
40071+ .name = "UltraSPARC-III",
40072+
40073+};
40074+
40075 static int __init us3_freq_init(void)
40076 {
40077 unsigned long manuf, impl, ver;
40078- int ret;
40079
40080 if (tlb_type != cheetah && tlb_type != cheetah_plus)
40081 return -ENODEV;
40082@@ -178,55 +187,15 @@ static int __init us3_freq_init(void)
40083 (impl == CHEETAH_IMPL ||
40084 impl == CHEETAH_PLUS_IMPL ||
40085 impl == JAGUAR_IMPL ||
40086- impl == PANTHER_IMPL)) {
40087- struct cpufreq_driver *driver;
40088-
40089- ret = -ENOMEM;
40090- driver = kzalloc(sizeof(*driver), GFP_KERNEL);
40091- if (!driver)
40092- goto err_out;
40093-
40094- us3_freq_table = kzalloc((NR_CPUS * sizeof(*us3_freq_table)),
40095- GFP_KERNEL);
40096- if (!us3_freq_table)
40097- goto err_out;
40098-
40099- driver->init = us3_freq_cpu_init;
40100- driver->verify = cpufreq_generic_frequency_table_verify;
40101- driver->target_index = us3_freq_target;
40102- driver->get = us3_freq_get;
40103- driver->exit = us3_freq_cpu_exit;
40104- strcpy(driver->name, "UltraSPARC-III");
40105-
40106- cpufreq_us3_driver = driver;
40107- ret = cpufreq_register_driver(driver);
40108- if (ret)
40109- goto err_out;
40110-
40111- return 0;
40112-
40113-err_out:
40114- if (driver) {
40115- kfree(driver);
40116- cpufreq_us3_driver = NULL;
40117- }
40118- kfree(us3_freq_table);
40119- us3_freq_table = NULL;
40120- return ret;
40121- }
40122+ impl == PANTHER_IMPL))
40123+ return cpufreq_register_driver(&cpufreq_us3_driver);
40124
40125 return -ENODEV;
40126 }
40127
40128 static void __exit us3_freq_exit(void)
40129 {
40130- if (cpufreq_us3_driver) {
40131- cpufreq_unregister_driver(cpufreq_us3_driver);
40132- kfree(cpufreq_us3_driver);
40133- cpufreq_us3_driver = NULL;
40134- kfree(us3_freq_table);
40135- us3_freq_table = NULL;
40136- }
40137+ cpufreq_unregister_driver(&cpufreq_us3_driver);
40138 }
40139
40140 MODULE_AUTHOR("David S. Miller <davem@redhat.com>");
40141diff --git a/drivers/cpufreq/speedstep-centrino.c b/drivers/cpufreq/speedstep-centrino.c
40142index 7d4a315..21bb886 100644
40143--- a/drivers/cpufreq/speedstep-centrino.c
40144+++ b/drivers/cpufreq/speedstep-centrino.c
40145@@ -351,8 +351,11 @@ static int centrino_cpu_init(struct cpufreq_policy *policy)
40146 !cpu_has(cpu, X86_FEATURE_EST))
40147 return -ENODEV;
40148
40149- if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC))
40150- centrino_driver.flags |= CPUFREQ_CONST_LOOPS;
40151+ if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC)) {
40152+ pax_open_kernel();
40153+ *(u8 *)&centrino_driver.flags |= CPUFREQ_CONST_LOOPS;
40154+ pax_close_kernel();
40155+ }
40156
40157 if (policy->cpu != 0)
40158 return -ENODEV;
40159diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c
40160index 2697e87..c32476c 100644
40161--- a/drivers/cpuidle/driver.c
40162+++ b/drivers/cpuidle/driver.c
40163@@ -194,7 +194,7 @@ static int poll_idle(struct cpuidle_device *dev,
40164
40165 static void poll_idle_init(struct cpuidle_driver *drv)
40166 {
40167- struct cpuidle_state *state = &drv->states[0];
40168+ cpuidle_state_no_const *state = &drv->states[0];
40169
40170 snprintf(state->name, CPUIDLE_NAME_LEN, "POLL");
40171 snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE");
40172diff --git a/drivers/cpuidle/governor.c b/drivers/cpuidle/governor.c
40173index fb9f511..213e6cc 100644
40174--- a/drivers/cpuidle/governor.c
40175+++ b/drivers/cpuidle/governor.c
40176@@ -87,7 +87,7 @@ int cpuidle_register_governor(struct cpuidle_governor *gov)
40177 mutex_lock(&cpuidle_lock);
40178 if (__cpuidle_find_governor(gov->name) == NULL) {
40179 ret = 0;
40180- list_add_tail(&gov->governor_list, &cpuidle_governors);
40181+ pax_list_add_tail((struct list_head *)&gov->governor_list, &cpuidle_governors);
40182 if (!cpuidle_curr_governor ||
40183 cpuidle_curr_governor->rating < gov->rating)
40184 cpuidle_switch_governor(gov);
40185diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
40186index 832a2c3..1794080 100644
40187--- a/drivers/cpuidle/sysfs.c
40188+++ b/drivers/cpuidle/sysfs.c
40189@@ -135,7 +135,7 @@ static struct attribute *cpuidle_switch_attrs[] = {
40190 NULL
40191 };
40192
40193-static struct attribute_group cpuidle_attr_group = {
40194+static attribute_group_no_const cpuidle_attr_group = {
40195 .attrs = cpuidle_default_attrs,
40196 .name = "cpuidle",
40197 };
40198diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
40199index 8d2a772..33826c9 100644
40200--- a/drivers/crypto/hifn_795x.c
40201+++ b/drivers/crypto/hifn_795x.c
40202@@ -51,7 +51,7 @@ module_param_string(hifn_pll_ref, hifn_pll_ref, sizeof(hifn_pll_ref), 0444);
40203 MODULE_PARM_DESC(hifn_pll_ref,
40204 "PLL reference clock (pci[freq] or ext[freq], default ext)");
40205
40206-static atomic_t hifn_dev_number;
40207+static atomic_unchecked_t hifn_dev_number;
40208
40209 #define ACRYPTO_OP_DECRYPT 0
40210 #define ACRYPTO_OP_ENCRYPT 1
40211@@ -2577,7 +2577,7 @@ static int hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id)
40212 goto err_out_disable_pci_device;
40213
40214 snprintf(name, sizeof(name), "hifn%d",
40215- atomic_inc_return(&hifn_dev_number)-1);
40216+ atomic_inc_return_unchecked(&hifn_dev_number)-1);
40217
40218 err = pci_request_regions(pdev, name);
40219 if (err)
40220diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
40221index 30b538d8..1610d75 100644
40222--- a/drivers/devfreq/devfreq.c
40223+++ b/drivers/devfreq/devfreq.c
40224@@ -673,7 +673,7 @@ int devfreq_add_governor(struct devfreq_governor *governor)
40225 goto err_out;
40226 }
40227
40228- list_add(&governor->node, &devfreq_governor_list);
40229+ pax_list_add((struct list_head *)&governor->node, &devfreq_governor_list);
40230
40231 list_for_each_entry(devfreq, &devfreq_list, node) {
40232 int ret = 0;
40233@@ -761,7 +761,7 @@ int devfreq_remove_governor(struct devfreq_governor *governor)
40234 }
40235 }
40236
40237- list_del(&governor->node);
40238+ pax_list_del((struct list_head *)&governor->node);
40239 err_out:
40240 mutex_unlock(&devfreq_list_lock);
40241
40242diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c
40243index 8ee383d..736b5de 100644
40244--- a/drivers/dma/sh/shdma-base.c
40245+++ b/drivers/dma/sh/shdma-base.c
40246@@ -228,8 +228,8 @@ static int shdma_alloc_chan_resources(struct dma_chan *chan)
40247 schan->slave_id = -EINVAL;
40248 }
40249
40250- schan->desc = kcalloc(NR_DESCS_PER_CHANNEL,
40251- sdev->desc_size, GFP_KERNEL);
40252+ schan->desc = kcalloc(sdev->desc_size,
40253+ NR_DESCS_PER_CHANNEL, GFP_KERNEL);
40254 if (!schan->desc) {
40255 ret = -ENOMEM;
40256 goto edescalloc;
40257diff --git a/drivers/dma/sh/shdmac.c b/drivers/dma/sh/shdmac.c
40258index 9f1d4c7..fceff78 100644
40259--- a/drivers/dma/sh/shdmac.c
40260+++ b/drivers/dma/sh/shdmac.c
40261@@ -513,7 +513,7 @@ static int sh_dmae_nmi_handler(struct notifier_block *self,
40262 return ret;
40263 }
40264
40265-static struct notifier_block sh_dmae_nmi_notifier __read_mostly = {
40266+static struct notifier_block sh_dmae_nmi_notifier = {
40267 .notifier_call = sh_dmae_nmi_handler,
40268
40269 /* Run before NMI debug handler and KGDB */
40270diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c
40271index 592af5f..bb1d583 100644
40272--- a/drivers/edac/edac_device.c
40273+++ b/drivers/edac/edac_device.c
40274@@ -477,9 +477,9 @@ void edac_device_reset_delay_period(struct edac_device_ctl_info *edac_dev,
40275 */
40276 int edac_device_alloc_index(void)
40277 {
40278- static atomic_t device_indexes = ATOMIC_INIT(0);
40279+ static atomic_unchecked_t device_indexes = ATOMIC_INIT(0);
40280
40281- return atomic_inc_return(&device_indexes) - 1;
40282+ return atomic_inc_return_unchecked(&device_indexes) - 1;
40283 }
40284 EXPORT_SYMBOL_GPL(edac_device_alloc_index);
40285
40286diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
40287index c84eecb..4d7381d 100644
40288--- a/drivers/edac/edac_mc_sysfs.c
40289+++ b/drivers/edac/edac_mc_sysfs.c
40290@@ -154,7 +154,7 @@ static const char * const edac_caps[] = {
40291 struct dev_ch_attribute {
40292 struct device_attribute attr;
40293 int channel;
40294-};
40295+} __do_const;
40296
40297 #define DEVICE_CHANNEL(_name, _mode, _show, _store, _var) \
40298 static struct dev_ch_attribute dev_attr_legacy_##_name = \
40299@@ -1009,15 +1009,17 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
40300 }
40301
40302 if (mci->set_sdram_scrub_rate || mci->get_sdram_scrub_rate) {
40303+ pax_open_kernel();
40304 if (mci->get_sdram_scrub_rate) {
40305- dev_attr_sdram_scrub_rate.attr.mode |= S_IRUGO;
40306- dev_attr_sdram_scrub_rate.show = &mci_sdram_scrub_rate_show;
40307+ *(umode_t *)&dev_attr_sdram_scrub_rate.attr.mode |= S_IRUGO;
40308+ *(void **)&dev_attr_sdram_scrub_rate.show = &mci_sdram_scrub_rate_show;
40309 }
40310
40311 if (mci->set_sdram_scrub_rate) {
40312- dev_attr_sdram_scrub_rate.attr.mode |= S_IWUSR;
40313- dev_attr_sdram_scrub_rate.store = &mci_sdram_scrub_rate_store;
40314+ *(umode_t *)&dev_attr_sdram_scrub_rate.attr.mode |= S_IWUSR;
40315+ *(void **)&dev_attr_sdram_scrub_rate.store = &mci_sdram_scrub_rate_store;
40316 }
40317+ pax_close_kernel();
40318
40319 err = device_create_file(&mci->dev, &dev_attr_sdram_scrub_rate);
40320 if (err) {
40321diff --git a/drivers/edac/edac_pci.c b/drivers/edac/edac_pci.c
40322index 2cf44b4d..6dd2dc7 100644
40323--- a/drivers/edac/edac_pci.c
40324+++ b/drivers/edac/edac_pci.c
40325@@ -29,7 +29,7 @@
40326
40327 static DEFINE_MUTEX(edac_pci_ctls_mutex);
40328 static LIST_HEAD(edac_pci_list);
40329-static atomic_t pci_indexes = ATOMIC_INIT(0);
40330+static atomic_unchecked_t pci_indexes = ATOMIC_INIT(0);
40331
40332 /*
40333 * edac_pci_alloc_ctl_info
40334@@ -315,7 +315,7 @@ EXPORT_SYMBOL_GPL(edac_pci_reset_delay_period);
40335 */
40336 int edac_pci_alloc_index(void)
40337 {
40338- return atomic_inc_return(&pci_indexes) - 1;
40339+ return atomic_inc_return_unchecked(&pci_indexes) - 1;
40340 }
40341 EXPORT_SYMBOL_GPL(edac_pci_alloc_index);
40342
40343diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
40344index 24d877f..4e30133 100644
40345--- a/drivers/edac/edac_pci_sysfs.c
40346+++ b/drivers/edac/edac_pci_sysfs.c
40347@@ -23,8 +23,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */
40348 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
40349 static int edac_pci_poll_msec = 1000; /* one second workq period */
40350
40351-static atomic_t pci_parity_count = ATOMIC_INIT(0);
40352-static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
40353+static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
40354+static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
40355
40356 static struct kobject *edac_pci_top_main_kobj;
40357 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
40358@@ -232,7 +232,7 @@ struct edac_pci_dev_attribute {
40359 void *value;
40360 ssize_t(*show) (void *, char *);
40361 ssize_t(*store) (void *, const char *, size_t);
40362-};
40363+} __do_const;
40364
40365 /* Set of show/store abstract level functions for PCI Parity object */
40366 static ssize_t edac_pci_dev_show(struct kobject *kobj, struct attribute *attr,
40367@@ -576,7 +576,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
40368 edac_printk(KERN_CRIT, EDAC_PCI,
40369 "Signaled System Error on %s\n",
40370 pci_name(dev));
40371- atomic_inc(&pci_nonparity_count);
40372+ atomic_inc_unchecked(&pci_nonparity_count);
40373 }
40374
40375 if (status & (PCI_STATUS_PARITY)) {
40376@@ -584,7 +584,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
40377 "Master Data Parity Error on %s\n",
40378 pci_name(dev));
40379
40380- atomic_inc(&pci_parity_count);
40381+ atomic_inc_unchecked(&pci_parity_count);
40382 }
40383
40384 if (status & (PCI_STATUS_DETECTED_PARITY)) {
40385@@ -592,7 +592,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
40386 "Detected Parity Error on %s\n",
40387 pci_name(dev));
40388
40389- atomic_inc(&pci_parity_count);
40390+ atomic_inc_unchecked(&pci_parity_count);
40391 }
40392 }
40393
40394@@ -615,7 +615,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
40395 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
40396 "Signaled System Error on %s\n",
40397 pci_name(dev));
40398- atomic_inc(&pci_nonparity_count);
40399+ atomic_inc_unchecked(&pci_nonparity_count);
40400 }
40401
40402 if (status & (PCI_STATUS_PARITY)) {
40403@@ -623,7 +623,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
40404 "Master Data Parity Error on "
40405 "%s\n", pci_name(dev));
40406
40407- atomic_inc(&pci_parity_count);
40408+ atomic_inc_unchecked(&pci_parity_count);
40409 }
40410
40411 if (status & (PCI_STATUS_DETECTED_PARITY)) {
40412@@ -631,7 +631,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
40413 "Detected Parity Error on %s\n",
40414 pci_name(dev));
40415
40416- atomic_inc(&pci_parity_count);
40417+ atomic_inc_unchecked(&pci_parity_count);
40418 }
40419 }
40420 }
40421@@ -669,7 +669,7 @@ void edac_pci_do_parity_check(void)
40422 if (!check_pci_errors)
40423 return;
40424
40425- before_count = atomic_read(&pci_parity_count);
40426+ before_count = atomic_read_unchecked(&pci_parity_count);
40427
40428 /* scan all PCI devices looking for a Parity Error on devices and
40429 * bridges.
40430@@ -681,7 +681,7 @@ void edac_pci_do_parity_check(void)
40431 /* Only if operator has selected panic on PCI Error */
40432 if (edac_pci_get_panic_on_pe()) {
40433 /* If the count is different 'after' from 'before' */
40434- if (before_count != atomic_read(&pci_parity_count))
40435+ if (before_count != atomic_read_unchecked(&pci_parity_count))
40436 panic("EDAC: PCI Parity Error");
40437 }
40438 }
40439diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h
40440index c2359a1..8bd119d 100644
40441--- a/drivers/edac/mce_amd.h
40442+++ b/drivers/edac/mce_amd.h
40443@@ -74,7 +74,7 @@ struct amd_decoder_ops {
40444 bool (*mc0_mce)(u16, u8);
40445 bool (*mc1_mce)(u16, u8);
40446 bool (*mc2_mce)(u16, u8);
40447-};
40448+} __no_const;
40449
40450 void amd_report_gart_errors(bool);
40451 void amd_register_ecc_decoder(void (*f)(int, struct mce *));
40452diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
40453index 57ea7f4..af06b76 100644
40454--- a/drivers/firewire/core-card.c
40455+++ b/drivers/firewire/core-card.c
40456@@ -528,9 +528,9 @@ void fw_card_initialize(struct fw_card *card,
40457 const struct fw_card_driver *driver,
40458 struct device *device)
40459 {
40460- static atomic_t index = ATOMIC_INIT(-1);
40461+ static atomic_unchecked_t index = ATOMIC_INIT(-1);
40462
40463- card->index = atomic_inc_return(&index);
40464+ card->index = atomic_inc_return_unchecked(&index);
40465 card->driver = driver;
40466 card->device = device;
40467 card->current_tlabel = 0;
40468@@ -680,7 +680,7 @@ EXPORT_SYMBOL_GPL(fw_card_release);
40469
40470 void fw_core_remove_card(struct fw_card *card)
40471 {
40472- struct fw_card_driver dummy_driver = dummy_driver_template;
40473+ fw_card_driver_no_const dummy_driver = dummy_driver_template;
40474
40475 card->driver->update_phy_reg(card, 4,
40476 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
40477diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
40478index f9e3aee..269dbdb 100644
40479--- a/drivers/firewire/core-device.c
40480+++ b/drivers/firewire/core-device.c
40481@@ -256,7 +256,7 @@ EXPORT_SYMBOL(fw_device_enable_phys_dma);
40482 struct config_rom_attribute {
40483 struct device_attribute attr;
40484 u32 key;
40485-};
40486+} __do_const;
40487
40488 static ssize_t show_immediate(struct device *dev,
40489 struct device_attribute *dattr, char *buf)
40490diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
40491index d6a09b9..18e90dd 100644
40492--- a/drivers/firewire/core-transaction.c
40493+++ b/drivers/firewire/core-transaction.c
40494@@ -38,6 +38,7 @@
40495 #include <linux/timer.h>
40496 #include <linux/types.h>
40497 #include <linux/workqueue.h>
40498+#include <linux/sched.h>
40499
40500 #include <asm/byteorder.h>
40501
40502diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
40503index e1480ff6..1a429bd 100644
40504--- a/drivers/firewire/core.h
40505+++ b/drivers/firewire/core.h
40506@@ -111,6 +111,7 @@ struct fw_card_driver {
40507
40508 int (*stop_iso)(struct fw_iso_context *ctx);
40509 };
40510+typedef struct fw_card_driver __no_const fw_card_driver_no_const;
40511
40512 void fw_card_initialize(struct fw_card *card,
40513 const struct fw_card_driver *driver, struct device *device);
40514diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
40515index f51d376..b118e40 100644
40516--- a/drivers/firewire/ohci.c
40517+++ b/drivers/firewire/ohci.c
40518@@ -2049,10 +2049,12 @@ static void bus_reset_work(struct work_struct *work)
40519 be32_to_cpu(ohci->next_header));
40520 }
40521
40522+#ifndef CONFIG_GRKERNSEC
40523 if (param_remote_dma) {
40524 reg_write(ohci, OHCI1394_PhyReqFilterHiSet, ~0);
40525 reg_write(ohci, OHCI1394_PhyReqFilterLoSet, ~0);
40526 }
40527+#endif
40528
40529 spin_unlock_irq(&ohci->lock);
40530
40531@@ -2584,8 +2586,10 @@ static int ohci_enable_phys_dma(struct fw_card *card,
40532 unsigned long flags;
40533 int n, ret = 0;
40534
40535+#ifndef CONFIG_GRKERNSEC
40536 if (param_remote_dma)
40537 return 0;
40538+#endif
40539
40540 /*
40541 * FIXME: Make sure this bitmask is cleared when we clear the busReset
40542diff --git a/drivers/firmware/dmi-id.c b/drivers/firmware/dmi-id.c
40543index 94a58a0..f5eba42 100644
40544--- a/drivers/firmware/dmi-id.c
40545+++ b/drivers/firmware/dmi-id.c
40546@@ -16,7 +16,7 @@
40547 struct dmi_device_attribute{
40548 struct device_attribute dev_attr;
40549 int field;
40550-};
40551+} __do_const;
40552 #define to_dmi_dev_attr(_dev_attr) \
40553 container_of(_dev_attr, struct dmi_device_attribute, dev_attr)
40554
40555diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c
40556index 4fd9961..52d60ce 100644
40557--- a/drivers/firmware/efi/cper.c
40558+++ b/drivers/firmware/efi/cper.c
40559@@ -44,12 +44,12 @@ static char rcd_decode_str[CPER_REC_LEN];
40560 */
40561 u64 cper_next_record_id(void)
40562 {
40563- static atomic64_t seq;
40564+ static atomic64_unchecked_t seq;
40565
40566- if (!atomic64_read(&seq))
40567- atomic64_set(&seq, ((u64)get_seconds()) << 32);
40568+ if (!atomic64_read_unchecked(&seq))
40569+ atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
40570
40571- return atomic64_inc_return(&seq);
40572+ return atomic64_inc_return_unchecked(&seq);
40573 }
40574 EXPORT_SYMBOL_GPL(cper_next_record_id);
40575
40576diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
40577index 3061bb8..92b5fcc 100644
40578--- a/drivers/firmware/efi/efi.c
40579+++ b/drivers/firmware/efi/efi.c
40580@@ -160,14 +160,16 @@ static struct attribute_group efi_subsys_attr_group = {
40581 };
40582
40583 static struct efivars generic_efivars;
40584-static struct efivar_operations generic_ops;
40585+static efivar_operations_no_const generic_ops __read_only;
40586
40587 static int generic_ops_register(void)
40588 {
40589- generic_ops.get_variable = efi.get_variable;
40590- generic_ops.set_variable = efi.set_variable;
40591- generic_ops.get_next_variable = efi.get_next_variable;
40592- generic_ops.query_variable_store = efi_query_variable_store;
40593+ pax_open_kernel();
40594+ *(void **)&generic_ops.get_variable = efi.get_variable;
40595+ *(void **)&generic_ops.set_variable = efi.set_variable;
40596+ *(void **)&generic_ops.get_next_variable = efi.get_next_variable;
40597+ *(void **)&generic_ops.query_variable_store = efi_query_variable_store;
40598+ pax_close_kernel();
40599
40600 return efivars_register(&generic_efivars, &generic_ops, efi_kobj);
40601 }
40602diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c
40603index 7b2e049..a253334 100644
40604--- a/drivers/firmware/efi/efivars.c
40605+++ b/drivers/firmware/efi/efivars.c
40606@@ -589,7 +589,7 @@ efivar_create_sysfs_entry(struct efivar_entry *new_var)
40607 static int
40608 create_efivars_bin_attributes(void)
40609 {
40610- struct bin_attribute *attr;
40611+ bin_attribute_no_const *attr;
40612 int error;
40613
40614 /* new_var */
40615diff --git a/drivers/firmware/efi/runtime-map.c b/drivers/firmware/efi/runtime-map.c
40616index 87b8e3b..c4afb35 100644
40617--- a/drivers/firmware/efi/runtime-map.c
40618+++ b/drivers/firmware/efi/runtime-map.c
40619@@ -97,7 +97,7 @@ static void map_release(struct kobject *kobj)
40620 kfree(entry);
40621 }
40622
40623-static struct kobj_type __refdata map_ktype = {
40624+static const struct kobj_type __refconst map_ktype = {
40625 .sysfs_ops = &map_attr_ops,
40626 .default_attrs = def_attrs,
40627 .release = map_release,
40628diff --git a/drivers/firmware/google/gsmi.c b/drivers/firmware/google/gsmi.c
40629index f1ab05e..ab51228 100644
40630--- a/drivers/firmware/google/gsmi.c
40631+++ b/drivers/firmware/google/gsmi.c
40632@@ -709,7 +709,7 @@ static u32 __init hash_oem_table_id(char s[8])
40633 return local_hash_64(input, 32);
40634 }
40635
40636-static struct dmi_system_id gsmi_dmi_table[] __initdata = {
40637+static const struct dmi_system_id gsmi_dmi_table[] __initconst = {
40638 {
40639 .ident = "Google Board",
40640 .matches = {
40641diff --git a/drivers/firmware/google/memconsole.c b/drivers/firmware/google/memconsole.c
40642index 2f569aa..26e4f39 100644
40643--- a/drivers/firmware/google/memconsole.c
40644+++ b/drivers/firmware/google/memconsole.c
40645@@ -136,7 +136,7 @@ static bool __init found_memconsole(void)
40646 return false;
40647 }
40648
40649-static struct dmi_system_id memconsole_dmi_table[] __initdata = {
40650+static const struct dmi_system_id memconsole_dmi_table[] __initconst = {
40651 {
40652 .ident = "Google Board",
40653 .matches = {
40654@@ -155,7 +155,10 @@ static int __init memconsole_init(void)
40655 if (!found_memconsole())
40656 return -ENODEV;
40657
40658- memconsole_bin_attr.size = memconsole_length;
40659+ pax_open_kernel();
40660+ *(size_t *)&memconsole_bin_attr.size = memconsole_length;
40661+ pax_close_kernel();
40662+
40663 return sysfs_create_bin_file(firmware_kobj, &memconsole_bin_attr);
40664 }
40665
40666diff --git a/drivers/firmware/memmap.c b/drivers/firmware/memmap.c
40667index cc016c61..d35279e 100644
40668--- a/drivers/firmware/memmap.c
40669+++ b/drivers/firmware/memmap.c
40670@@ -124,7 +124,7 @@ static void __meminit release_firmware_map_entry(struct kobject *kobj)
40671 kfree(entry);
40672 }
40673
40674-static struct kobj_type __refdata memmap_ktype = {
40675+static const struct kobj_type __refconst memmap_ktype = {
40676 .release = release_firmware_map_entry,
40677 .sysfs_ops = &memmap_attr_ops,
40678 .default_attrs = def_attrs,
40679diff --git a/drivers/gpio/gpio-em.c b/drivers/gpio/gpio-em.c
40680index 3cfcfc6..09d6f117 100644
40681--- a/drivers/gpio/gpio-em.c
40682+++ b/drivers/gpio/gpio-em.c
40683@@ -278,7 +278,7 @@ static int em_gio_probe(struct platform_device *pdev)
40684 struct em_gio_priv *p;
40685 struct resource *io[2], *irq[2];
40686 struct gpio_chip *gpio_chip;
40687- struct irq_chip *irq_chip;
40688+ irq_chip_no_const *irq_chip;
40689 const char *name = dev_name(&pdev->dev);
40690 int ret;
40691
40692diff --git a/drivers/gpio/gpio-ich.c b/drivers/gpio/gpio-ich.c
40693index 7818cd1..1be40e5 100644
40694--- a/drivers/gpio/gpio-ich.c
40695+++ b/drivers/gpio/gpio-ich.c
40696@@ -94,7 +94,7 @@ struct ichx_desc {
40697 * this option allows driver caching written output values
40698 */
40699 bool use_outlvl_cache;
40700-};
40701+} __do_const;
40702
40703 static struct {
40704 spinlock_t lock;
40705diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
40706index f476ae2..05e1bdd 100644
40707--- a/drivers/gpio/gpio-omap.c
40708+++ b/drivers/gpio/gpio-omap.c
40709@@ -1188,7 +1188,7 @@ static int omap_gpio_probe(struct platform_device *pdev)
40710 const struct omap_gpio_platform_data *pdata;
40711 struct resource *res;
40712 struct gpio_bank *bank;
40713- struct irq_chip *irqc;
40714+ irq_chip_no_const *irqc;
40715 int ret;
40716
40717 match = of_match_device(of_match_ptr(omap_gpio_match), dev);
40718diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
40719index c49522e..9a7ee54 100644
40720--- a/drivers/gpio/gpio-rcar.c
40721+++ b/drivers/gpio/gpio-rcar.c
40722@@ -348,7 +348,7 @@ static int gpio_rcar_probe(struct platform_device *pdev)
40723 struct gpio_rcar_priv *p;
40724 struct resource *io, *irq;
40725 struct gpio_chip *gpio_chip;
40726- struct irq_chip *irq_chip;
40727+ irq_chip_no_const *irq_chip;
40728 struct device *dev = &pdev->dev;
40729 const char *name = dev_name(dev);
40730 int ret;
40731diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c
40732index c1caa45..f0f97d2 100644
40733--- a/drivers/gpio/gpio-vr41xx.c
40734+++ b/drivers/gpio/gpio-vr41xx.c
40735@@ -224,7 +224,7 @@ static int giu_get_irq(unsigned int irq)
40736 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
40737 maskl, pendl, maskh, pendh);
40738
40739- atomic_inc(&irq_err_count);
40740+ atomic_inc_unchecked(&irq_err_count);
40741
40742 return -EINVAL;
40743 }
40744diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
40745index 1ca9295..9f3d481 100644
40746--- a/drivers/gpio/gpiolib.c
40747+++ b/drivers/gpio/gpiolib.c
40748@@ -554,8 +554,10 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip)
40749 }
40750
40751 if (gpiochip->irqchip) {
40752- gpiochip->irqchip->irq_request_resources = NULL;
40753- gpiochip->irqchip->irq_release_resources = NULL;
40754+ pax_open_kernel();
40755+ *(void **)&gpiochip->irqchip->irq_request_resources = NULL;
40756+ *(void **)&gpiochip->irqchip->irq_release_resources = NULL;
40757+ pax_close_kernel();
40758 gpiochip->irqchip = NULL;
40759 }
40760 }
40761@@ -621,8 +623,11 @@ int gpiochip_irqchip_add(struct gpio_chip *gpiochip,
40762 gpiochip->irqchip = NULL;
40763 return -EINVAL;
40764 }
40765- irqchip->irq_request_resources = gpiochip_irq_reqres;
40766- irqchip->irq_release_resources = gpiochip_irq_relres;
40767+
40768+ pax_open_kernel();
40769+ *(void **)&irqchip->irq_request_resources = gpiochip_irq_reqres;
40770+ *(void **)&irqchip->irq_release_resources = gpiochip_irq_relres;
40771+ pax_close_kernel();
40772
40773 /*
40774 * Prepare the mapping since the irqchip shall be orthogonal to
40775diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
40776index 488f51d..301d462 100644
40777--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
40778+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
40779@@ -118,7 +118,7 @@ struct device_queue_manager_ops {
40780 enum cache_policy alternate_policy,
40781 void __user *alternate_aperture_base,
40782 uint64_t alternate_aperture_size);
40783-};
40784+} __no_const;
40785
40786 /**
40787 * struct device_queue_manager
40788diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h
40789index 5940531..a75b0e5 100644
40790--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h
40791+++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h
40792@@ -62,7 +62,7 @@ struct kernel_queue_ops {
40793
40794 void (*submit_packet)(struct kernel_queue *kq);
40795 void (*rollback_packet)(struct kernel_queue *kq);
40796-};
40797+} __no_const;
40798
40799 struct kernel_queue {
40800 struct kernel_queue_ops ops;
40801diff --git a/drivers/gpu/drm/drm_context.c b/drivers/gpu/drm/drm_context.c
40802index 9b23525..65f4110 100644
40803--- a/drivers/gpu/drm/drm_context.c
40804+++ b/drivers/gpu/drm/drm_context.c
40805@@ -53,6 +53,9 @@ struct drm_ctx_list {
40806 */
40807 void drm_legacy_ctxbitmap_free(struct drm_device * dev, int ctx_handle)
40808 {
40809+ if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT))
40810+ return;
40811+
40812 mutex_lock(&dev->struct_mutex);
40813 idr_remove(&dev->ctx_idr, ctx_handle);
40814 mutex_unlock(&dev->struct_mutex);
40815@@ -87,6 +90,9 @@ static int drm_legacy_ctxbitmap_next(struct drm_device * dev)
40816 */
40817 int drm_legacy_ctxbitmap_init(struct drm_device * dev)
40818 {
40819+ if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT))
40820+ return -EINVAL;
40821+
40822 idr_init(&dev->ctx_idr);
40823 return 0;
40824 }
40825@@ -101,6 +107,9 @@ int drm_legacy_ctxbitmap_init(struct drm_device * dev)
40826 */
40827 void drm_legacy_ctxbitmap_cleanup(struct drm_device * dev)
40828 {
40829+ if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT))
40830+ return;
40831+
40832 mutex_lock(&dev->struct_mutex);
40833 idr_destroy(&dev->ctx_idr);
40834 mutex_unlock(&dev->struct_mutex);
40835@@ -119,11 +128,14 @@ void drm_legacy_ctxbitmap_flush(struct drm_device *dev, struct drm_file *file)
40836 {
40837 struct drm_ctx_list *pos, *tmp;
40838
40839+ if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT))
40840+ return;
40841+
40842 mutex_lock(&dev->ctxlist_mutex);
40843
40844 list_for_each_entry_safe(pos, tmp, &dev->ctxlist, head) {
40845 if (pos->tag == file &&
40846- pos->handle != DRM_KERNEL_CONTEXT) {
40847+ _DRM_LOCKING_CONTEXT(pos->handle) != DRM_KERNEL_CONTEXT) {
40848 if (dev->driver->context_dtor)
40849 dev->driver->context_dtor(dev, pos->handle);
40850
40851@@ -161,6 +173,9 @@ int drm_legacy_getsareactx(struct drm_device *dev, void *data,
40852 struct drm_local_map *map;
40853 struct drm_map_list *_entry;
40854
40855+ if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT))
40856+ return -EINVAL;
40857+
40858 mutex_lock(&dev->struct_mutex);
40859
40860 map = idr_find(&dev->ctx_idr, request->ctx_id);
40861@@ -205,6 +220,9 @@ int drm_legacy_setsareactx(struct drm_device *dev, void *data,
40862 struct drm_local_map *map = NULL;
40863 struct drm_map_list *r_list = NULL;
40864
40865+ if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT))
40866+ return -EINVAL;
40867+
40868 mutex_lock(&dev->struct_mutex);
40869 list_for_each_entry(r_list, &dev->maplist, head) {
40870 if (r_list->map
40871@@ -277,7 +295,13 @@ static int drm_context_switch_complete(struct drm_device *dev,
40872 {
40873 dev->last_context = new; /* PRE/POST: This is the _only_ writer. */
40874
40875- if (!_DRM_LOCK_IS_HELD(file_priv->master->lock.hw_lock->lock)) {
40876+ if (file_priv->master->lock.hw_lock == NULL) {
40877+ DRM_ERROR(
40878+ "Device has been unregistered. Hard exit. Process %d\n",
40879+ task_pid_nr(current));
40880+ send_sig(SIGTERM, current, 0);
40881+ return -EPERM;
40882+ } else if (!_DRM_LOCK_IS_HELD(file_priv->master->lock.hw_lock->lock)) {
40883 DRM_ERROR("Lock isn't held after context switch\n");
40884 }
40885
40886@@ -305,6 +329,9 @@ int drm_legacy_resctx(struct drm_device *dev, void *data,
40887 struct drm_ctx ctx;
40888 int i;
40889
40890+ if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT))
40891+ return -EINVAL;
40892+
40893 if (res->count >= DRM_RESERVED_CONTEXTS) {
40894 memset(&ctx, 0, sizeof(ctx));
40895 for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
40896@@ -335,8 +362,11 @@ int drm_legacy_addctx(struct drm_device *dev, void *data,
40897 struct drm_ctx_list *ctx_entry;
40898 struct drm_ctx *ctx = data;
40899
40900+ if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT))
40901+ return -EINVAL;
40902+
40903 ctx->handle = drm_legacy_ctxbitmap_next(dev);
40904- if (ctx->handle == DRM_KERNEL_CONTEXT) {
40905+ if (_DRM_LOCKING_CONTEXT(ctx->handle) == DRM_KERNEL_CONTEXT) {
40906 /* Skip kernel's context and get a new one. */
40907 ctx->handle = drm_legacy_ctxbitmap_next(dev);
40908 }
40909@@ -378,6 +408,9 @@ int drm_legacy_getctx(struct drm_device *dev, void *data,
40910 {
40911 struct drm_ctx *ctx = data;
40912
40913+ if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT))
40914+ return -EINVAL;
40915+
40916 /* This is 0, because we don't handle any context flags */
40917 ctx->flags = 0;
40918
40919@@ -400,6 +433,9 @@ int drm_legacy_switchctx(struct drm_device *dev, void *data,
40920 {
40921 struct drm_ctx *ctx = data;
40922
40923+ if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT))
40924+ return -EINVAL;
40925+
40926 DRM_DEBUG("%d\n", ctx->handle);
40927 return drm_context_switch(dev, dev->last_context, ctx->handle);
40928 }
40929@@ -420,6 +456,9 @@ int drm_legacy_newctx(struct drm_device *dev, void *data,
40930 {
40931 struct drm_ctx *ctx = data;
40932
40933+ if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT))
40934+ return -EINVAL;
40935+
40936 DRM_DEBUG("%d\n", ctx->handle);
40937 drm_context_switch_complete(dev, file_priv, ctx->handle);
40938
40939@@ -442,8 +481,11 @@ int drm_legacy_rmctx(struct drm_device *dev, void *data,
40940 {
40941 struct drm_ctx *ctx = data;
40942
40943+ if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT))
40944+ return -EINVAL;
40945+
40946 DRM_DEBUG("%d\n", ctx->handle);
40947- if (ctx->handle != DRM_KERNEL_CONTEXT) {
40948+ if (_DRM_LOCKING_CONTEXT(ctx->handle) != DRM_KERNEL_CONTEXT) {
40949 if (dev->driver->context_dtor)
40950 dev->driver->context_dtor(dev, ctx->handle);
40951 drm_legacy_ctxbitmap_free(dev, ctx->handle);
40952diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
40953index b6f076b..2918de2 100644
40954--- a/drivers/gpu/drm/drm_crtc.c
40955+++ b/drivers/gpu/drm/drm_crtc.c
40956@@ -4118,7 +4118,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
40957 goto done;
40958 }
40959
40960- if (copy_to_user(&enum_ptr[copied].name,
40961+ if (copy_to_user(enum_ptr[copied].name,
40962 &prop_enum->name, DRM_PROP_NAME_LEN)) {
40963 ret = -EFAULT;
40964 goto done;
40965diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
40966index d512134..a80a8e4 100644
40967--- a/drivers/gpu/drm/drm_drv.c
40968+++ b/drivers/gpu/drm/drm_drv.c
40969@@ -448,7 +448,7 @@ void drm_unplug_dev(struct drm_device *dev)
40970
40971 drm_device_set_unplugged(dev);
40972
40973- if (dev->open_count == 0) {
40974+ if (local_read(&dev->open_count) == 0) {
40975 drm_put_dev(dev);
40976 }
40977 mutex_unlock(&drm_global_mutex);
40978@@ -596,10 +596,13 @@ struct drm_device *drm_dev_alloc(struct drm_driver *driver,
40979 if (drm_ht_create(&dev->map_hash, 12))
40980 goto err_minors;
40981
40982- ret = drm_legacy_ctxbitmap_init(dev);
40983- if (ret) {
40984- DRM_ERROR("Cannot allocate memory for context bitmap.\n");
40985- goto err_ht;
40986+ if (drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT)) {
40987+ ret = drm_legacy_ctxbitmap_init(dev);
40988+ if (ret) {
40989+ DRM_ERROR(
40990+ "Cannot allocate memory for context bitmap.\n");
40991+ goto err_ht;
40992+ }
40993 }
40994
40995 if (drm_core_check_feature(dev, DRIVER_GEM)) {
40996diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
40997index 076dd60..e4a4ba7 100644
40998--- a/drivers/gpu/drm/drm_fops.c
40999+++ b/drivers/gpu/drm/drm_fops.c
41000@@ -89,7 +89,7 @@ int drm_open(struct inode *inode, struct file *filp)
41001 return PTR_ERR(minor);
41002
41003 dev = minor->dev;
41004- if (!dev->open_count++)
41005+ if (local_inc_return(&dev->open_count) == 1)
41006 need_setup = 1;
41007
41008 /* share address_space across all char-devs of a single device */
41009@@ -106,7 +106,7 @@ int drm_open(struct inode *inode, struct file *filp)
41010 return 0;
41011
41012 err_undo:
41013- dev->open_count--;
41014+ local_dec(&dev->open_count);
41015 drm_minor_release(minor);
41016 return retcode;
41017 }
41018@@ -376,7 +376,7 @@ int drm_release(struct inode *inode, struct file *filp)
41019
41020 mutex_lock(&drm_global_mutex);
41021
41022- DRM_DEBUG("open_count = %d\n", dev->open_count);
41023+ DRM_DEBUG("open_count = %ld\n", local_read(&dev->open_count));
41024
41025 mutex_lock(&dev->struct_mutex);
41026 list_del(&file_priv->lhead);
41027@@ -389,10 +389,10 @@ int drm_release(struct inode *inode, struct file *filp)
41028 * Begin inline drm_release
41029 */
41030
41031- DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
41032+ DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %ld\n",
41033 task_pid_nr(current),
41034 (long)old_encode_dev(file_priv->minor->kdev->devt),
41035- dev->open_count);
41036+ local_read(&dev->open_count));
41037
41038 /* Release any auth tokens that might point to this file_priv,
41039 (do that under the drm_global_mutex) */
41040@@ -465,7 +465,7 @@ int drm_release(struct inode *inode, struct file *filp)
41041 * End inline drm_release
41042 */
41043
41044- if (!--dev->open_count) {
41045+ if (local_dec_and_test(&dev->open_count)) {
41046 retcode = drm_lastclose(dev);
41047 if (drm_device_is_unplugged(dev))
41048 drm_put_dev(dev);
41049diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
41050index 3d2e91c..d31c4c9 100644
41051--- a/drivers/gpu/drm/drm_global.c
41052+++ b/drivers/gpu/drm/drm_global.c
41053@@ -36,7 +36,7 @@
41054 struct drm_global_item {
41055 struct mutex mutex;
41056 void *object;
41057- int refcount;
41058+ atomic_t refcount;
41059 };
41060
41061 static struct drm_global_item glob[DRM_GLOBAL_NUM];
41062@@ -49,7 +49,7 @@ void drm_global_init(void)
41063 struct drm_global_item *item = &glob[i];
41064 mutex_init(&item->mutex);
41065 item->object = NULL;
41066- item->refcount = 0;
41067+ atomic_set(&item->refcount, 0);
41068 }
41069 }
41070
41071@@ -59,7 +59,7 @@ void drm_global_release(void)
41072 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
41073 struct drm_global_item *item = &glob[i];
41074 BUG_ON(item->object != NULL);
41075- BUG_ON(item->refcount != 0);
41076+ BUG_ON(atomic_read(&item->refcount) != 0);
41077 }
41078 }
41079
41080@@ -69,7 +69,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
41081 struct drm_global_item *item = &glob[ref->global_type];
41082
41083 mutex_lock(&item->mutex);
41084- if (item->refcount == 0) {
41085+ if (atomic_read(&item->refcount) == 0) {
41086 item->object = kzalloc(ref->size, GFP_KERNEL);
41087 if (unlikely(item->object == NULL)) {
41088 ret = -ENOMEM;
41089@@ -82,7 +82,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
41090 goto out_err;
41091
41092 }
41093- ++item->refcount;
41094+ atomic_inc(&item->refcount);
41095 ref->object = item->object;
41096 mutex_unlock(&item->mutex);
41097 return 0;
41098@@ -98,9 +98,9 @@ void drm_global_item_unref(struct drm_global_reference *ref)
41099 struct drm_global_item *item = &glob[ref->global_type];
41100
41101 mutex_lock(&item->mutex);
41102- BUG_ON(item->refcount == 0);
41103+ BUG_ON(atomic_read(&item->refcount) == 0);
41104 BUG_ON(ref->object != item->object);
41105- if (--item->refcount == 0) {
41106+ if (atomic_dec_and_test(&item->refcount)) {
41107 ref->release(ref);
41108 item->object = NULL;
41109 }
41110diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
41111index f1b32f9..394f791 100644
41112--- a/drivers/gpu/drm/drm_info.c
41113+++ b/drivers/gpu/drm/drm_info.c
41114@@ -76,10 +76,13 @@ int drm_vm_info(struct seq_file *m, void *data)
41115 struct drm_local_map *map;
41116 struct drm_map_list *r_list;
41117
41118- /* Hardcoded from _DRM_FRAME_BUFFER,
41119- _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
41120- _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
41121- const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
41122+ static const char * const types[] = {
41123+ [_DRM_FRAME_BUFFER] = "FB",
41124+ [_DRM_REGISTERS] = "REG",
41125+ [_DRM_SHM] = "SHM",
41126+ [_DRM_AGP] = "AGP",
41127+ [_DRM_SCATTER_GATHER] = "SG",
41128+ [_DRM_CONSISTENT] = "PCI"};
41129 const char *type;
41130 int i;
41131
41132@@ -90,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void *data)
41133 map = r_list->map;
41134 if (!map)
41135 continue;
41136- if (map->type < 0 || map->type > 5)
41137+ if (map->type >= ARRAY_SIZE(types))
41138 type = "??";
41139 else
41140 type = types[map->type];
41141diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
41142index 2f4c4343..dd12cd2 100644
41143--- a/drivers/gpu/drm/drm_ioc32.c
41144+++ b/drivers/gpu/drm/drm_ioc32.c
41145@@ -457,7 +457,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
41146 request = compat_alloc_user_space(nbytes);
41147 if (!access_ok(VERIFY_WRITE, request, nbytes))
41148 return -EFAULT;
41149- list = (struct drm_buf_desc *) (request + 1);
41150+ list = (struct drm_buf_desc __user *) (request + 1);
41151
41152 if (__put_user(count, &request->count)
41153 || __put_user(list, &request->list))
41154@@ -518,7 +518,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
41155 request = compat_alloc_user_space(nbytes);
41156 if (!access_ok(VERIFY_WRITE, request, nbytes))
41157 return -EFAULT;
41158- list = (struct drm_buf_pub *) (request + 1);
41159+ list = (struct drm_buf_pub __user *) (request + 1);
41160
41161 if (__put_user(count, &request->count)
41162 || __put_user(list, &request->list))
41163@@ -1016,7 +1016,7 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
41164 return 0;
41165 }
41166
41167-drm_ioctl_compat_t *drm_compat_ioctls[] = {
41168+drm_ioctl_compat_t drm_compat_ioctls[] = {
41169 [DRM_IOCTL_NR(DRM_IOCTL_VERSION32)] = compat_drm_version,
41170 [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE32)] = compat_drm_getunique,
41171 [DRM_IOCTL_NR(DRM_IOCTL_GET_MAP32)] = compat_drm_getmap,
41172@@ -1062,7 +1062,6 @@ drm_ioctl_compat_t *drm_compat_ioctls[] = {
41173 long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
41174 {
41175 unsigned int nr = DRM_IOCTL_NR(cmd);
41176- drm_ioctl_compat_t *fn;
41177 int ret;
41178
41179 /* Assume that ioctls without an explicit compat routine will just
41180@@ -1072,10 +1071,8 @@ long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
41181 if (nr >= ARRAY_SIZE(drm_compat_ioctls))
41182 return drm_ioctl(filp, cmd, arg);
41183
41184- fn = drm_compat_ioctls[nr];
41185-
41186- if (fn != NULL)
41187- ret = (*fn) (filp, cmd, arg);
41188+ if (drm_compat_ioctls[nr] != NULL)
41189+ ret = (*drm_compat_ioctls[nr]) (filp, cmd, arg);
41190 else
41191 ret = drm_ioctl(filp, cmd, arg);
41192
41193diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
41194index 3785d66..1c489ef 100644
41195--- a/drivers/gpu/drm/drm_ioctl.c
41196+++ b/drivers/gpu/drm/drm_ioctl.c
41197@@ -655,7 +655,7 @@ long drm_ioctl(struct file *filp,
41198 struct drm_file *file_priv = filp->private_data;
41199 struct drm_device *dev;
41200 const struct drm_ioctl_desc *ioctl = NULL;
41201- drm_ioctl_t *func;
41202+ drm_ioctl_no_const_t func;
41203 unsigned int nr = DRM_IOCTL_NR(cmd);
41204 int retcode = -EINVAL;
41205 char stack_kdata[128];
41206diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
41207index f861361..b61d4c7 100644
41208--- a/drivers/gpu/drm/drm_lock.c
41209+++ b/drivers/gpu/drm/drm_lock.c
41210@@ -61,9 +61,12 @@ int drm_legacy_lock(struct drm_device *dev, void *data,
41211 struct drm_master *master = file_priv->master;
41212 int ret = 0;
41213
41214+ if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT))
41215+ return -EINVAL;
41216+
41217 ++file_priv->lock_count;
41218
41219- if (lock->context == DRM_KERNEL_CONTEXT) {
41220+ if (_DRM_LOCKING_CONTEXT(lock->context) == DRM_KERNEL_CONTEXT) {
41221 DRM_ERROR("Process %d using kernel context %d\n",
41222 task_pid_nr(current), lock->context);
41223 return -EINVAL;
41224@@ -153,12 +156,23 @@ int drm_legacy_unlock(struct drm_device *dev, void *data, struct drm_file *file_
41225 struct drm_lock *lock = data;
41226 struct drm_master *master = file_priv->master;
41227
41228- if (lock->context == DRM_KERNEL_CONTEXT) {
41229+ if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT))
41230+ return -EINVAL;
41231+
41232+ if (_DRM_LOCKING_CONTEXT(lock->context) == DRM_KERNEL_CONTEXT) {
41233 DRM_ERROR("Process %d using kernel context %d\n",
41234 task_pid_nr(current), lock->context);
41235 return -EINVAL;
41236 }
41237
41238+ if (!master->lock.hw_lock) {
41239+ DRM_ERROR(
41240+ "Device has been unregistered. Hard exit. Process %d\n",
41241+ task_pid_nr(current));
41242+ send_sig(SIGTERM, current, 0);
41243+ return -EPERM;
41244+ }
41245+
41246 if (drm_legacy_lock_free(&master->lock, lock->context)) {
41247 /* FIXME: Should really bail out here. */
41248 }
41249diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c b/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
41250index d4813e0..6c1ab4d 100644
41251--- a/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
41252+++ b/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
41253@@ -825,10 +825,16 @@ void mdfld_dsi_dpi_mode_set(struct drm_encoder *encoder,
41254 u32 pipeconf_reg = PIPEACONF;
41255 u32 dspcntr_reg = DSPACNTR;
41256
41257- u32 pipeconf = dev_priv->pipeconf[pipe];
41258- u32 dspcntr = dev_priv->dspcntr[pipe];
41259+ u32 pipeconf;
41260+ u32 dspcntr;
41261 u32 mipi = MIPI_PORT_EN | PASS_FROM_SPHY_TO_AFE | SEL_FLOPPED_HSTX;
41262
41263+ if (pipe == -1)
41264+ return;
41265+
41266+ pipeconf = dev_priv->pipeconf[pipe];
41267+ dspcntr = dev_priv->dspcntr[pipe];
41268+
41269 if (pipe) {
41270 pipeconf_reg = PIPECCONF;
41271 dspcntr_reg = DSPCCNTR;
41272diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
41273index 93ec5dc..82acbaf 100644
41274--- a/drivers/gpu/drm/i810/i810_drv.h
41275+++ b/drivers/gpu/drm/i810/i810_drv.h
41276@@ -110,8 +110,8 @@ typedef struct drm_i810_private {
41277 int page_flipping;
41278
41279 wait_queue_head_t irq_queue;
41280- atomic_t irq_received;
41281- atomic_t irq_emitted;
41282+ atomic_unchecked_t irq_received;
41283+ atomic_unchecked_t irq_emitted;
41284
41285 int front_offset;
41286 } drm_i810_private_t;
41287diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
41288index 1a46787..7fb387c 100644
41289--- a/drivers/gpu/drm/i915/i915_dma.c
41290+++ b/drivers/gpu/drm/i915/i915_dma.c
41291@@ -149,6 +149,9 @@ static int i915_getparam(struct drm_device *dev, void *data,
41292 case I915_PARAM_MMAP_VERSION:
41293 value = 1;
41294 break;
41295+ case I915_PARAM_HAS_LEGACY_CONTEXT:
41296+ value = drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT);
41297+ break;
41298 default:
41299 DRM_DEBUG("Unknown parameter %d\n", param->param);
41300 return -EINVAL;
41301@@ -362,7 +365,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
41302 * locking inversion with the driver load path. And the access here is
41303 * completely racy anyway. So don't bother with locking for now.
41304 */
41305- return dev->open_count == 0;
41306+ return local_read(&dev->open_count) == 0;
41307 }
41308
41309 static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
41310diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
41311index 38a7425..5322b16 100644
41312--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
41313+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
41314@@ -872,12 +872,12 @@ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
41315 static int
41316 validate_exec_list(struct drm_device *dev,
41317 struct drm_i915_gem_exec_object2 *exec,
41318- int count)
41319+ unsigned int count)
41320 {
41321 unsigned relocs_total = 0;
41322 unsigned relocs_max = UINT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
41323 unsigned invalid_flags;
41324- int i;
41325+ unsigned int i;
41326
41327 invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS;
41328 if (USES_FULL_PPGTT(dev))
41329diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c
41330index 176de63..b50b66a 100644
41331--- a/drivers/gpu/drm/i915/i915_ioc32.c
41332+++ b/drivers/gpu/drm/i915/i915_ioc32.c
41333@@ -62,7 +62,7 @@ static int compat_i915_batchbuffer(struct file *file, unsigned int cmd,
41334 || __put_user(batchbuffer32.DR4, &batchbuffer->DR4)
41335 || __put_user(batchbuffer32.num_cliprects,
41336 &batchbuffer->num_cliprects)
41337- || __put_user((int __user *)(unsigned long)batchbuffer32.cliprects,
41338+ || __put_user((struct drm_clip_rect __user *)(unsigned long)batchbuffer32.cliprects,
41339 &batchbuffer->cliprects))
41340 return -EFAULT;
41341
41342@@ -91,13 +91,13 @@ static int compat_i915_cmdbuffer(struct file *file, unsigned int cmd,
41343
41344 cmdbuffer = compat_alloc_user_space(sizeof(*cmdbuffer));
41345 if (!access_ok(VERIFY_WRITE, cmdbuffer, sizeof(*cmdbuffer))
41346- || __put_user((int __user *)(unsigned long)cmdbuffer32.buf,
41347+ || __put_user((char __user *)(unsigned long)cmdbuffer32.buf,
41348 &cmdbuffer->buf)
41349 || __put_user(cmdbuffer32.sz, &cmdbuffer->sz)
41350 || __put_user(cmdbuffer32.DR1, &cmdbuffer->DR1)
41351 || __put_user(cmdbuffer32.DR4, &cmdbuffer->DR4)
41352 || __put_user(cmdbuffer32.num_cliprects, &cmdbuffer->num_cliprects)
41353- || __put_user((int __user *)(unsigned long)cmdbuffer32.cliprects,
41354+ || __put_user((struct drm_clip_rect __user *)(unsigned long)cmdbuffer32.cliprects,
41355 &cmdbuffer->cliprects))
41356 return -EFAULT;
41357
41358@@ -181,7 +181,7 @@ static int compat_i915_alloc(struct file *file, unsigned int cmd,
41359 (unsigned long)request);
41360 }
41361
41362-static drm_ioctl_compat_t *i915_compat_ioctls[] = {
41363+static drm_ioctl_compat_t i915_compat_ioctls[] = {
41364 [DRM_I915_BATCHBUFFER] = compat_i915_batchbuffer,
41365 [DRM_I915_CMDBUFFER] = compat_i915_cmdbuffer,
41366 [DRM_I915_GETPARAM] = compat_i915_getparam,
41367@@ -201,17 +201,13 @@ static drm_ioctl_compat_t *i915_compat_ioctls[] = {
41368 long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
41369 {
41370 unsigned int nr = DRM_IOCTL_NR(cmd);
41371- drm_ioctl_compat_t *fn = NULL;
41372 int ret;
41373
41374 if (nr < DRM_COMMAND_BASE)
41375 return drm_compat_ioctl(filp, cmd, arg);
41376
41377- if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(i915_compat_ioctls))
41378- fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
41379-
41380- if (fn != NULL)
41381- ret = (*fn) (filp, cmd, arg);
41382+ if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(i915_compat_ioctls) && i915_compat_ioctls[nr - DRM_COMMAND_BASE])
41383+ ret = (*i915_compat_ioctls[nr - DRM_COMMAND_BASE])(filp, cmd, arg);
41384 else
41385 ret = drm_ioctl(filp, cmd, arg);
41386
41387diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
41388index f75173c..f283e45 100644
41389--- a/drivers/gpu/drm/i915/intel_display.c
41390+++ b/drivers/gpu/drm/i915/intel_display.c
41391@@ -13056,13 +13056,13 @@ struct intel_quirk {
41392 int subsystem_vendor;
41393 int subsystem_device;
41394 void (*hook)(struct drm_device *dev);
41395-};
41396+} __do_const;
41397
41398 /* For systems that don't have a meaningful PCI subdevice/subvendor ID */
41399 struct intel_dmi_quirk {
41400 void (*hook)(struct drm_device *dev);
41401 const struct dmi_system_id (*dmi_id_list)[];
41402-};
41403+} __do_const;
41404
41405 static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
41406 {
41407@@ -13070,18 +13070,20 @@ static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
41408 return 1;
41409 }
41410
41411-static const struct intel_dmi_quirk intel_dmi_quirks[] = {
41412+static const struct dmi_system_id intel_dmi_quirks_table[] = {
41413 {
41414- .dmi_id_list = &(const struct dmi_system_id[]) {
41415- {
41416- .callback = intel_dmi_reverse_brightness,
41417- .ident = "NCR Corporation",
41418- .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
41419- DMI_MATCH(DMI_PRODUCT_NAME, ""),
41420- },
41421- },
41422- { } /* terminating entry */
41423+ .callback = intel_dmi_reverse_brightness,
41424+ .ident = "NCR Corporation",
41425+ .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
41426+ DMI_MATCH(DMI_PRODUCT_NAME, ""),
41427 },
41428+ },
41429+ { } /* terminating entry */
41430+};
41431+
41432+static const struct intel_dmi_quirk intel_dmi_quirks[] = {
41433+ {
41434+ .dmi_id_list = &intel_dmi_quirks_table,
41435 .hook = quirk_invert_brightness,
41436 },
41437 };
41438diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c
41439index a002f53..0d60514 100644
41440--- a/drivers/gpu/drm/imx/imx-drm-core.c
41441+++ b/drivers/gpu/drm/imx/imx-drm-core.c
41442@@ -355,7 +355,7 @@ int imx_drm_add_crtc(struct drm_device *drm, struct drm_crtc *crtc,
41443 if (imxdrm->pipes >= MAX_CRTC)
41444 return -EINVAL;
41445
41446- if (imxdrm->drm->open_count)
41447+ if (local_read(&imxdrm->drm->open_count))
41448 return -EBUSY;
41449
41450 imx_drm_crtc = kzalloc(sizeof(*imx_drm_crtc), GFP_KERNEL);
41451diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
41452index b4a20149..219ab78 100644
41453--- a/drivers/gpu/drm/mga/mga_drv.h
41454+++ b/drivers/gpu/drm/mga/mga_drv.h
41455@@ -122,9 +122,9 @@ typedef struct drm_mga_private {
41456 u32 clear_cmd;
41457 u32 maccess;
41458
41459- atomic_t vbl_received; /**< Number of vblanks received. */
41460+ atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
41461 wait_queue_head_t fence_queue;
41462- atomic_t last_fence_retired;
41463+ atomic_unchecked_t last_fence_retired;
41464 u32 next_fence_to_post;
41465
41466 unsigned int fb_cpp;
41467diff --git a/drivers/gpu/drm/mga/mga_ioc32.c b/drivers/gpu/drm/mga/mga_ioc32.c
41468index 729bfd5..14bae78 100644
41469--- a/drivers/gpu/drm/mga/mga_ioc32.c
41470+++ b/drivers/gpu/drm/mga/mga_ioc32.c
41471@@ -190,7 +190,7 @@ static int compat_mga_dma_bootstrap(struct file *file, unsigned int cmd,
41472 return 0;
41473 }
41474
41475-drm_ioctl_compat_t *mga_compat_ioctls[] = {
41476+drm_ioctl_compat_t mga_compat_ioctls[] = {
41477 [DRM_MGA_INIT] = compat_mga_init,
41478 [DRM_MGA_GETPARAM] = compat_mga_getparam,
41479 [DRM_MGA_DMA_BOOTSTRAP] = compat_mga_dma_bootstrap,
41480@@ -208,17 +208,13 @@ drm_ioctl_compat_t *mga_compat_ioctls[] = {
41481 long mga_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
41482 {
41483 unsigned int nr = DRM_IOCTL_NR(cmd);
41484- drm_ioctl_compat_t *fn = NULL;
41485 int ret;
41486
41487 if (nr < DRM_COMMAND_BASE)
41488 return drm_compat_ioctl(filp, cmd, arg);
41489
41490- if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(mga_compat_ioctls))
41491- fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE];
41492-
41493- if (fn != NULL)
41494- ret = (*fn) (filp, cmd, arg);
41495+ if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(mga_compat_ioctls) && mga_compat_ioctls[nr - DRM_COMMAND_BASE])
41496+ ret = (*mga_compat_ioctls[nr - DRM_COMMAND_BASE]) (filp, cmd, arg);
41497 else
41498 ret = drm_ioctl(filp, cmd, arg);
41499
41500diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
41501index 1b071b8..de8601a 100644
41502--- a/drivers/gpu/drm/mga/mga_irq.c
41503+++ b/drivers/gpu/drm/mga/mga_irq.c
41504@@ -43,7 +43,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
41505 if (crtc != 0)
41506 return 0;
41507
41508- return atomic_read(&dev_priv->vbl_received);
41509+ return atomic_read_unchecked(&dev_priv->vbl_received);
41510 }
41511
41512
41513@@ -59,7 +59,7 @@ irqreturn_t mga_driver_irq_handler(int irq, void *arg)
41514 /* VBLANK interrupt */
41515 if (status & MGA_VLINEPEN) {
41516 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
41517- atomic_inc(&dev_priv->vbl_received);
41518+ atomic_inc_unchecked(&dev_priv->vbl_received);
41519 drm_handle_vblank(dev, 0);
41520 handled = 1;
41521 }
41522@@ -78,7 +78,7 @@ irqreturn_t mga_driver_irq_handler(int irq, void *arg)
41523 if ((prim_start & ~0x03) != (prim_end & ~0x03))
41524 MGA_WRITE(MGA_PRIMEND, prim_end);
41525
41526- atomic_inc(&dev_priv->last_fence_retired);
41527+ atomic_inc_unchecked(&dev_priv->last_fence_retired);
41528 wake_up(&dev_priv->fence_queue);
41529 handled = 1;
41530 }
41531@@ -129,7 +129,7 @@ int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence)
41532 * using fences.
41533 */
41534 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * HZ,
41535- (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
41536+ (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
41537 - *sequence) <= (1 << 23)));
41538
41539 *sequence = cur_fence;
41540diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
41541index 0190b69..60c3eaf 100644
41542--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
41543+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
41544@@ -963,7 +963,7 @@ static int parse_bit_tmds_tbl_entry(struct drm_device *dev, struct nvbios *bios,
41545 struct bit_table {
41546 const char id;
41547 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
41548-};
41549+} __no_const;
41550
41551 #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
41552
41553diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
41554index 8763deb..936b423 100644
41555--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
41556+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
41557@@ -940,7 +940,8 @@ static struct drm_driver
41558 driver_stub = {
41559 .driver_features =
41560 DRIVER_USE_AGP |
41561- DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_RENDER,
41562+ DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_RENDER |
41563+ DRIVER_KMS_LEGACY_CONTEXT,
41564
41565 .load = nouveau_drm_load,
41566 .unload = nouveau_drm_unload,
41567diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h
41568index fc68f09..0511d71 100644
41569--- a/drivers/gpu/drm/nouveau/nouveau_drm.h
41570+++ b/drivers/gpu/drm/nouveau/nouveau_drm.h
41571@@ -121,7 +121,6 @@ struct nouveau_drm {
41572 struct drm_global_reference mem_global_ref;
41573 struct ttm_bo_global_ref bo_global_ref;
41574 struct ttm_bo_device bdev;
41575- atomic_t validate_sequence;
41576 int (*move)(struct nouveau_channel *,
41577 struct ttm_buffer_object *,
41578 struct ttm_mem_reg *, struct ttm_mem_reg *);
41579diff --git a/drivers/gpu/drm/nouveau/nouveau_ioc32.c b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
41580index 462679a..88e32a7 100644
41581--- a/drivers/gpu/drm/nouveau/nouveau_ioc32.c
41582+++ b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
41583@@ -50,7 +50,7 @@ long nouveau_compat_ioctl(struct file *filp, unsigned int cmd,
41584 unsigned long arg)
41585 {
41586 unsigned int nr = DRM_IOCTL_NR(cmd);
41587- drm_ioctl_compat_t *fn = NULL;
41588+ drm_ioctl_compat_t fn = NULL;
41589 int ret;
41590
41591 if (nr < DRM_COMMAND_BASE)
41592diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
41593index 273e501..3b6c0a2 100644
41594--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
41595+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
41596@@ -127,11 +127,11 @@ nouveau_vram_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
41597 }
41598
41599 const struct ttm_mem_type_manager_func nouveau_vram_manager = {
41600- nouveau_vram_manager_init,
41601- nouveau_vram_manager_fini,
41602- nouveau_vram_manager_new,
41603- nouveau_vram_manager_del,
41604- nouveau_vram_manager_debug
41605+ .init = nouveau_vram_manager_init,
41606+ .takedown = nouveau_vram_manager_fini,
41607+ .get_node = nouveau_vram_manager_new,
41608+ .put_node = nouveau_vram_manager_del,
41609+ .debug = nouveau_vram_manager_debug
41610 };
41611
41612 static int
41613@@ -195,11 +195,11 @@ nouveau_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
41614 }
41615
41616 const struct ttm_mem_type_manager_func nouveau_gart_manager = {
41617- nouveau_gart_manager_init,
41618- nouveau_gart_manager_fini,
41619- nouveau_gart_manager_new,
41620- nouveau_gart_manager_del,
41621- nouveau_gart_manager_debug
41622+ .init = nouveau_gart_manager_init,
41623+ .takedown = nouveau_gart_manager_fini,
41624+ .get_node = nouveau_gart_manager_new,
41625+ .put_node = nouveau_gart_manager_del,
41626+ .debug = nouveau_gart_manager_debug
41627 };
41628
41629 /*XXX*/
41630@@ -268,11 +268,11 @@ nv04_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
41631 }
41632
41633 const struct ttm_mem_type_manager_func nv04_gart_manager = {
41634- nv04_gart_manager_init,
41635- nv04_gart_manager_fini,
41636- nv04_gart_manager_new,
41637- nv04_gart_manager_del,
41638- nv04_gart_manager_debug
41639+ .init = nv04_gart_manager_init,
41640+ .takedown = nv04_gart_manager_fini,
41641+ .get_node = nv04_gart_manager_new,
41642+ .put_node = nv04_gart_manager_del,
41643+ .debug = nv04_gart_manager_debug
41644 };
41645
41646 int
41647diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
41648index c7592ec..dd45ebc 100644
41649--- a/drivers/gpu/drm/nouveau/nouveau_vga.c
41650+++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
41651@@ -72,7 +72,7 @@ nouveau_switcheroo_can_switch(struct pci_dev *pdev)
41652 * locking inversion with the driver load path. And the access here is
41653 * completely racy anyway. So don't bother with locking for now.
41654 */
41655- return dev->open_count == 0;
41656+ return local_read(&dev->open_count) == 0;
41657 }
41658
41659 static const struct vga_switcheroo_client_ops
41660diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c
41661index 9782364..89bd954 100644
41662--- a/drivers/gpu/drm/qxl/qxl_cmd.c
41663+++ b/drivers/gpu/drm/qxl/qxl_cmd.c
41664@@ -285,27 +285,27 @@ static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port,
41665 int ret;
41666
41667 mutex_lock(&qdev->async_io_mutex);
41668- irq_num = atomic_read(&qdev->irq_received_io_cmd);
41669+ irq_num = atomic_read_unchecked(&qdev->irq_received_io_cmd);
41670 if (qdev->last_sent_io_cmd > irq_num) {
41671 if (intr)
41672 ret = wait_event_interruptible_timeout(qdev->io_cmd_event,
41673- atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
41674+ atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
41675 else
41676 ret = wait_event_timeout(qdev->io_cmd_event,
41677- atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
41678+ atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
41679 /* 0 is timeout, just bail the "hw" has gone away */
41680 if (ret <= 0)
41681 goto out;
41682- irq_num = atomic_read(&qdev->irq_received_io_cmd);
41683+ irq_num = atomic_read_unchecked(&qdev->irq_received_io_cmd);
41684 }
41685 outb(val, addr);
41686 qdev->last_sent_io_cmd = irq_num + 1;
41687 if (intr)
41688 ret = wait_event_interruptible_timeout(qdev->io_cmd_event,
41689- atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
41690+ atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
41691 else
41692 ret = wait_event_timeout(qdev->io_cmd_event,
41693- atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
41694+ atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
41695 out:
41696 if (ret > 0)
41697 ret = 0;
41698diff --git a/drivers/gpu/drm/qxl/qxl_debugfs.c b/drivers/gpu/drm/qxl/qxl_debugfs.c
41699index 6911b8c..89d6867 100644
41700--- a/drivers/gpu/drm/qxl/qxl_debugfs.c
41701+++ b/drivers/gpu/drm/qxl/qxl_debugfs.c
41702@@ -42,10 +42,10 @@ qxl_debugfs_irq_received(struct seq_file *m, void *data)
41703 struct drm_info_node *node = (struct drm_info_node *) m->private;
41704 struct qxl_device *qdev = node->minor->dev->dev_private;
41705
41706- seq_printf(m, "%d\n", atomic_read(&qdev->irq_received));
41707- seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_display));
41708- seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_cursor));
41709- seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_io_cmd));
41710+ seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received));
41711+ seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received_display));
41712+ seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received_cursor));
41713+ seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received_io_cmd));
41714 seq_printf(m, "%d\n", qdev->irq_received_error);
41715 return 0;
41716 }
41717diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
41718index 7c6cafe..460f542 100644
41719--- a/drivers/gpu/drm/qxl/qxl_drv.h
41720+++ b/drivers/gpu/drm/qxl/qxl_drv.h
41721@@ -290,10 +290,10 @@ struct qxl_device {
41722 unsigned int last_sent_io_cmd;
41723
41724 /* interrupt handling */
41725- atomic_t irq_received;
41726- atomic_t irq_received_display;
41727- atomic_t irq_received_cursor;
41728- atomic_t irq_received_io_cmd;
41729+ atomic_unchecked_t irq_received;
41730+ atomic_unchecked_t irq_received_display;
41731+ atomic_unchecked_t irq_received_cursor;
41732+ atomic_unchecked_t irq_received_io_cmd;
41733 unsigned irq_received_error;
41734 wait_queue_head_t display_event;
41735 wait_queue_head_t cursor_event;
41736diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
41737index b110883..dd06418 100644
41738--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
41739+++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
41740@@ -181,7 +181,7 @@ static int qxl_process_single_command(struct qxl_device *qdev,
41741
41742 /* TODO copy slow path code from i915 */
41743 fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_SIZE));
41744- unwritten = __copy_from_user_inatomic_nocache(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), (void *)(unsigned long)cmd->command, cmd->command_size);
41745+ unwritten = __copy_from_user_inatomic_nocache(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), (void __force_user *)(unsigned long)cmd->command, cmd->command_size);
41746
41747 {
41748 struct qxl_drawable *draw = fb_cmd;
41749@@ -201,7 +201,7 @@ static int qxl_process_single_command(struct qxl_device *qdev,
41750 struct drm_qxl_reloc reloc;
41751
41752 if (copy_from_user(&reloc,
41753- &((struct drm_qxl_reloc *)(uintptr_t)cmd->relocs)[i],
41754+ &((struct drm_qxl_reloc __force_user *)(uintptr_t)cmd->relocs)[i],
41755 sizeof(reloc))) {
41756 ret = -EFAULT;
41757 goto out_free_bos;
41758@@ -294,10 +294,10 @@ static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data,
41759
41760 for (cmd_num = 0; cmd_num < execbuffer->commands_num; ++cmd_num) {
41761
41762- struct drm_qxl_command *commands =
41763- (struct drm_qxl_command *)(uintptr_t)execbuffer->commands;
41764+ struct drm_qxl_command __user *commands =
41765+ (struct drm_qxl_command __user *)(uintptr_t)execbuffer->commands;
41766
41767- if (copy_from_user(&user_cmd, &commands[cmd_num],
41768+ if (copy_from_user(&user_cmd, (struct drm_qxl_command __force_user *)&commands[cmd_num],
41769 sizeof(user_cmd)))
41770 return -EFAULT;
41771
41772diff --git a/drivers/gpu/drm/qxl/qxl_irq.c b/drivers/gpu/drm/qxl/qxl_irq.c
41773index 0bf1e20..42a7310 100644
41774--- a/drivers/gpu/drm/qxl/qxl_irq.c
41775+++ b/drivers/gpu/drm/qxl/qxl_irq.c
41776@@ -36,19 +36,19 @@ irqreturn_t qxl_irq_handler(int irq, void *arg)
41777 if (!pending)
41778 return IRQ_NONE;
41779
41780- atomic_inc(&qdev->irq_received);
41781+ atomic_inc_unchecked(&qdev->irq_received);
41782
41783 if (pending & QXL_INTERRUPT_DISPLAY) {
41784- atomic_inc(&qdev->irq_received_display);
41785+ atomic_inc_unchecked(&qdev->irq_received_display);
41786 wake_up_all(&qdev->display_event);
41787 qxl_queue_garbage_collect(qdev, false);
41788 }
41789 if (pending & QXL_INTERRUPT_CURSOR) {
41790- atomic_inc(&qdev->irq_received_cursor);
41791+ atomic_inc_unchecked(&qdev->irq_received_cursor);
41792 wake_up_all(&qdev->cursor_event);
41793 }
41794 if (pending & QXL_INTERRUPT_IO_CMD) {
41795- atomic_inc(&qdev->irq_received_io_cmd);
41796+ atomic_inc_unchecked(&qdev->irq_received_io_cmd);
41797 wake_up_all(&qdev->io_cmd_event);
41798 }
41799 if (pending & QXL_INTERRUPT_ERROR) {
41800@@ -85,10 +85,10 @@ int qxl_irq_init(struct qxl_device *qdev)
41801 init_waitqueue_head(&qdev->io_cmd_event);
41802 INIT_WORK(&qdev->client_monitors_config_work,
41803 qxl_client_monitors_config_work_func);
41804- atomic_set(&qdev->irq_received, 0);
41805- atomic_set(&qdev->irq_received_display, 0);
41806- atomic_set(&qdev->irq_received_cursor, 0);
41807- atomic_set(&qdev->irq_received_io_cmd, 0);
41808+ atomic_set_unchecked(&qdev->irq_received, 0);
41809+ atomic_set_unchecked(&qdev->irq_received_display, 0);
41810+ atomic_set_unchecked(&qdev->irq_received_cursor, 0);
41811+ atomic_set_unchecked(&qdev->irq_received_io_cmd, 0);
41812 qdev->irq_received_error = 0;
41813 ret = drm_irq_install(qdev->ddev, qdev->ddev->pdev->irq);
41814 qdev->ram_header->int_mask = QXL_INTERRUPT_MASK;
41815diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
41816index 0cbc4c9..0e46686 100644
41817--- a/drivers/gpu/drm/qxl/qxl_ttm.c
41818+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
41819@@ -103,7 +103,7 @@ static void qxl_ttm_global_fini(struct qxl_device *qdev)
41820 }
41821 }
41822
41823-static struct vm_operations_struct qxl_ttm_vm_ops;
41824+static vm_operations_struct_no_const qxl_ttm_vm_ops __read_only;
41825 static const struct vm_operations_struct *ttm_vm_ops;
41826
41827 static int qxl_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
41828@@ -145,8 +145,10 @@ int qxl_mmap(struct file *filp, struct vm_area_struct *vma)
41829 return r;
41830 if (unlikely(ttm_vm_ops == NULL)) {
41831 ttm_vm_ops = vma->vm_ops;
41832+ pax_open_kernel();
41833 qxl_ttm_vm_ops = *ttm_vm_ops;
41834 qxl_ttm_vm_ops.fault = &qxl_ttm_fault;
41835+ pax_close_kernel();
41836 }
41837 vma->vm_ops = &qxl_ttm_vm_ops;
41838 return 0;
41839@@ -464,25 +466,23 @@ static int qxl_mm_dump_table(struct seq_file *m, void *data)
41840 static int qxl_ttm_debugfs_init(struct qxl_device *qdev)
41841 {
41842 #if defined(CONFIG_DEBUG_FS)
41843- static struct drm_info_list qxl_mem_types_list[QXL_DEBUGFS_MEM_TYPES];
41844- static char qxl_mem_types_names[QXL_DEBUGFS_MEM_TYPES][32];
41845- unsigned i;
41846+ static struct drm_info_list qxl_mem_types_list[QXL_DEBUGFS_MEM_TYPES] = {
41847+ {
41848+ .name = "qxl_mem_mm",
41849+ .show = &qxl_mm_dump_table,
41850+ },
41851+ {
41852+ .name = "qxl_surf_mm",
41853+ .show = &qxl_mm_dump_table,
41854+ }
41855+ };
41856
41857- for (i = 0; i < QXL_DEBUGFS_MEM_TYPES; i++) {
41858- if (i == 0)
41859- sprintf(qxl_mem_types_names[i], "qxl_mem_mm");
41860- else
41861- sprintf(qxl_mem_types_names[i], "qxl_surf_mm");
41862- qxl_mem_types_list[i].name = qxl_mem_types_names[i];
41863- qxl_mem_types_list[i].show = &qxl_mm_dump_table;
41864- qxl_mem_types_list[i].driver_features = 0;
41865- if (i == 0)
41866- qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_VRAM].priv;
41867- else
41868- qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_PRIV0].priv;
41869+ pax_open_kernel();
41870+ *(void **)&qxl_mem_types_list[0].data = qdev->mman.bdev.man[TTM_PL_VRAM].priv;
41871+ *(void **)&qxl_mem_types_list[1].data = qdev->mman.bdev.man[TTM_PL_PRIV0].priv;
41872+ pax_close_kernel();
41873
41874- }
41875- return qxl_debugfs_add_files(qdev, qxl_mem_types_list, i);
41876+ return qxl_debugfs_add_files(qdev, qxl_mem_types_list, QXL_DEBUGFS_MEM_TYPES);
41877 #else
41878 return 0;
41879 #endif
41880diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
41881index 2c45ac9..5d740f8 100644
41882--- a/drivers/gpu/drm/r128/r128_cce.c
41883+++ b/drivers/gpu/drm/r128/r128_cce.c
41884@@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
41885
41886 /* GH: Simple idle check.
41887 */
41888- atomic_set(&dev_priv->idle_count, 0);
41889+ atomic_set_unchecked(&dev_priv->idle_count, 0);
41890
41891 /* We don't support anything other than bus-mastering ring mode,
41892 * but the ring can be in either AGP or PCI space for the ring
41893diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
41894index 723e5d6..102dbaf 100644
41895--- a/drivers/gpu/drm/r128/r128_drv.h
41896+++ b/drivers/gpu/drm/r128/r128_drv.h
41897@@ -93,14 +93,14 @@ typedef struct drm_r128_private {
41898 int is_pci;
41899 unsigned long cce_buffers_offset;
41900
41901- atomic_t idle_count;
41902+ atomic_unchecked_t idle_count;
41903
41904 int page_flipping;
41905 int current_page;
41906 u32 crtc_offset;
41907 u32 crtc_offset_cntl;
41908
41909- atomic_t vbl_received;
41910+ atomic_unchecked_t vbl_received;
41911
41912 u32 color_fmt;
41913 unsigned int front_offset;
41914diff --git a/drivers/gpu/drm/r128/r128_ioc32.c b/drivers/gpu/drm/r128/r128_ioc32.c
41915index 663f38c..ec159a1 100644
41916--- a/drivers/gpu/drm/r128/r128_ioc32.c
41917+++ b/drivers/gpu/drm/r128/r128_ioc32.c
41918@@ -178,7 +178,7 @@ static int compat_r128_getparam(struct file *file, unsigned int cmd,
41919 return drm_ioctl(file, DRM_IOCTL_R128_GETPARAM, (unsigned long)getparam);
41920 }
41921
41922-drm_ioctl_compat_t *r128_compat_ioctls[] = {
41923+drm_ioctl_compat_t r128_compat_ioctls[] = {
41924 [DRM_R128_INIT] = compat_r128_init,
41925 [DRM_R128_DEPTH] = compat_r128_depth,
41926 [DRM_R128_STIPPLE] = compat_r128_stipple,
41927@@ -197,17 +197,13 @@ drm_ioctl_compat_t *r128_compat_ioctls[] = {
41928 long r128_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
41929 {
41930 unsigned int nr = DRM_IOCTL_NR(cmd);
41931- drm_ioctl_compat_t *fn = NULL;
41932 int ret;
41933
41934 if (nr < DRM_COMMAND_BASE)
41935 return drm_compat_ioctl(filp, cmd, arg);
41936
41937- if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(r128_compat_ioctls))
41938- fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE];
41939-
41940- if (fn != NULL)
41941- ret = (*fn) (filp, cmd, arg);
41942+ if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(r128_compat_ioctls) && r128_compat_ioctls[nr - DRM_COMMAND_BASE])
41943+ ret = (*r128_compat_ioctls[nr - DRM_COMMAND_BASE]) (filp, cmd, arg);
41944 else
41945 ret = drm_ioctl(filp, cmd, arg);
41946
41947diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
41948index c2ae496..30b5993 100644
41949--- a/drivers/gpu/drm/r128/r128_irq.c
41950+++ b/drivers/gpu/drm/r128/r128_irq.c
41951@@ -41,7 +41,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
41952 if (crtc != 0)
41953 return 0;
41954
41955- return atomic_read(&dev_priv->vbl_received);
41956+ return atomic_read_unchecked(&dev_priv->vbl_received);
41957 }
41958
41959 irqreturn_t r128_driver_irq_handler(int irq, void *arg)
41960@@ -55,7 +55,7 @@ irqreturn_t r128_driver_irq_handler(int irq, void *arg)
41961 /* VBLANK interrupt */
41962 if (status & R128_CRTC_VBLANK_INT) {
41963 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
41964- atomic_inc(&dev_priv->vbl_received);
41965+ atomic_inc_unchecked(&dev_priv->vbl_received);
41966 drm_handle_vblank(dev, 0);
41967 return IRQ_HANDLED;
41968 }
41969diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
41970index 8fd2d9f..18c9660 100644
41971--- a/drivers/gpu/drm/r128/r128_state.c
41972+++ b/drivers/gpu/drm/r128/r128_state.c
41973@@ -320,10 +320,10 @@ static void r128_clear_box(drm_r128_private_t *dev_priv,
41974
41975 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
41976 {
41977- if (atomic_read(&dev_priv->idle_count) == 0)
41978+ if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
41979 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
41980 else
41981- atomic_set(&dev_priv->idle_count, 0);
41982+ atomic_set_unchecked(&dev_priv->idle_count, 0);
41983 }
41984
41985 #endif
41986diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
41987index b928c17..e5d9400 100644
41988--- a/drivers/gpu/drm/radeon/mkregtable.c
41989+++ b/drivers/gpu/drm/radeon/mkregtable.c
41990@@ -624,14 +624,14 @@ static int parser_auth(struct table *t, const char *filename)
41991 regex_t mask_rex;
41992 regmatch_t match[4];
41993 char buf[1024];
41994- size_t end;
41995+ long end;
41996 int len;
41997 int done = 0;
41998 int r;
41999 unsigned o;
42000 struct offset *offset;
42001 char last_reg_s[10];
42002- int last_reg;
42003+ unsigned long last_reg;
42004
42005 if (regcomp
42006 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
42007diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
42008index aa232fd..7e5f6e1 100644
42009--- a/drivers/gpu/drm/radeon/radeon_device.c
42010+++ b/drivers/gpu/drm/radeon/radeon_device.c
42011@@ -1247,7 +1247,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
42012 * locking inversion with the driver load path. And the access here is
42013 * completely racy anyway. So don't bother with locking for now.
42014 */
42015- return dev->open_count == 0;
42016+ return local_read(&dev->open_count) == 0;
42017 }
42018
42019 static const struct vga_switcheroo_client_ops radeon_switcheroo_ops = {
42020diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
42021index 46bd393..6ae4719 100644
42022--- a/drivers/gpu/drm/radeon/radeon_drv.h
42023+++ b/drivers/gpu/drm/radeon/radeon_drv.h
42024@@ -264,7 +264,7 @@ typedef struct drm_radeon_private {
42025
42026 /* SW interrupt */
42027 wait_queue_head_t swi_queue;
42028- atomic_t swi_emitted;
42029+ atomic_unchecked_t swi_emitted;
42030 int vblank_crtc;
42031 uint32_t irq_enable_reg;
42032 uint32_t r500_disp_irq_reg;
42033diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
42034index 0b98ea1..a3c770f 100644
42035--- a/drivers/gpu/drm/radeon/radeon_ioc32.c
42036+++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
42037@@ -358,7 +358,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
42038 request = compat_alloc_user_space(sizeof(*request));
42039 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
42040 || __put_user(req32.param, &request->param)
42041- || __put_user((void __user *)(unsigned long)req32.value,
42042+ || __put_user((unsigned long)req32.value,
42043 &request->value))
42044 return -EFAULT;
42045
42046@@ -368,7 +368,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
42047 #define compat_radeon_cp_setparam NULL
42048 #endif /* X86_64 || IA64 */
42049
42050-static drm_ioctl_compat_t *radeon_compat_ioctls[] = {
42051+static drm_ioctl_compat_t radeon_compat_ioctls[] = {
42052 [DRM_RADEON_CP_INIT] = compat_radeon_cp_init,
42053 [DRM_RADEON_CLEAR] = compat_radeon_cp_clear,
42054 [DRM_RADEON_STIPPLE] = compat_radeon_cp_stipple,
42055@@ -393,17 +393,13 @@ static drm_ioctl_compat_t *radeon_compat_ioctls[] = {
42056 long radeon_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
42057 {
42058 unsigned int nr = DRM_IOCTL_NR(cmd);
42059- drm_ioctl_compat_t *fn = NULL;
42060 int ret;
42061
42062 if (nr < DRM_COMMAND_BASE)
42063 return drm_compat_ioctl(filp, cmd, arg);
42064
42065- if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(radeon_compat_ioctls))
42066- fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE];
42067-
42068- if (fn != NULL)
42069- ret = (*fn) (filp, cmd, arg);
42070+ if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(radeon_compat_ioctls) && radeon_compat_ioctls[nr - DRM_COMMAND_BASE])
42071+ ret = (*radeon_compat_ioctls[nr - DRM_COMMAND_BASE]) (filp, cmd, arg);
42072 else
42073 ret = drm_ioctl(filp, cmd, arg);
42074
42075diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
42076index 244b19b..c19226d 100644
42077--- a/drivers/gpu/drm/radeon/radeon_irq.c
42078+++ b/drivers/gpu/drm/radeon/radeon_irq.c
42079@@ -226,8 +226,8 @@ static int radeon_emit_irq(struct drm_device * dev)
42080 unsigned int ret;
42081 RING_LOCALS;
42082
42083- atomic_inc(&dev_priv->swi_emitted);
42084- ret = atomic_read(&dev_priv->swi_emitted);
42085+ atomic_inc_unchecked(&dev_priv->swi_emitted);
42086+ ret = atomic_read_unchecked(&dev_priv->swi_emitted);
42087
42088 BEGIN_RING(4);
42089 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
42090@@ -353,7 +353,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
42091 drm_radeon_private_t *dev_priv =
42092 (drm_radeon_private_t *) dev->dev_private;
42093
42094- atomic_set(&dev_priv->swi_emitted, 0);
42095+ atomic_set_unchecked(&dev_priv->swi_emitted, 0);
42096 init_waitqueue_head(&dev_priv->swi_queue);
42097
42098 dev->max_vblank_count = 0x001fffff;
42099diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
42100index 15aee72..cda326e 100644
42101--- a/drivers/gpu/drm/radeon/radeon_state.c
42102+++ b/drivers/gpu/drm/radeon/radeon_state.c
42103@@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *
42104 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
42105 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
42106
42107- if (copy_from_user(&depth_boxes, clear->depth_boxes,
42108+ if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || copy_from_user(&depth_boxes, clear->depth_boxes,
42109 sarea_priv->nbox * sizeof(depth_boxes[0])))
42110 return -EFAULT;
42111
42112@@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
42113 {
42114 drm_radeon_private_t *dev_priv = dev->dev_private;
42115 drm_radeon_getparam_t *param = data;
42116- int value;
42117+ int value = 0;
42118
42119 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
42120
42121diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
42122index edafd3c..3af7c9c 100644
42123--- a/drivers/gpu/drm/radeon/radeon_ttm.c
42124+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
42125@@ -961,7 +961,7 @@ void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size)
42126 man->size = size >> PAGE_SHIFT;
42127 }
42128
42129-static struct vm_operations_struct radeon_ttm_vm_ops;
42130+static vm_operations_struct_no_const radeon_ttm_vm_ops __read_only;
42131 static const struct vm_operations_struct *ttm_vm_ops = NULL;
42132
42133 static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
42134@@ -1002,8 +1002,10 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
42135 }
42136 if (unlikely(ttm_vm_ops == NULL)) {
42137 ttm_vm_ops = vma->vm_ops;
42138+ pax_open_kernel();
42139 radeon_ttm_vm_ops = *ttm_vm_ops;
42140 radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
42141+ pax_close_kernel();
42142 }
42143 vma->vm_ops = &radeon_ttm_vm_ops;
42144 return 0;
42145diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
42146index 1a52522..8e78043 100644
42147--- a/drivers/gpu/drm/tegra/dc.c
42148+++ b/drivers/gpu/drm/tegra/dc.c
42149@@ -1585,7 +1585,7 @@ static int tegra_dc_debugfs_init(struct tegra_dc *dc, struct drm_minor *minor)
42150 }
42151
42152 for (i = 0; i < ARRAY_SIZE(debugfs_files); i++)
42153- dc->debugfs_files[i].data = dc;
42154+ *(void **)&dc->debugfs_files[i].data = dc;
42155
42156 err = drm_debugfs_create_files(dc->debugfs_files,
42157 ARRAY_SIZE(debugfs_files),
42158diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c
42159index ed970f6..4eeea42 100644
42160--- a/drivers/gpu/drm/tegra/dsi.c
42161+++ b/drivers/gpu/drm/tegra/dsi.c
42162@@ -62,7 +62,7 @@ struct tegra_dsi {
42163 struct clk *clk_lp;
42164 struct clk *clk;
42165
42166- struct drm_info_list *debugfs_files;
42167+ drm_info_list_no_const *debugfs_files;
42168 struct drm_minor *minor;
42169 struct dentry *debugfs;
42170
42171diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
42172index 7eaaee74..cc2bc04 100644
42173--- a/drivers/gpu/drm/tegra/hdmi.c
42174+++ b/drivers/gpu/drm/tegra/hdmi.c
42175@@ -64,7 +64,7 @@ struct tegra_hdmi {
42176 bool stereo;
42177 bool dvi;
42178
42179- struct drm_info_list *debugfs_files;
42180+ drm_info_list_no_const *debugfs_files;
42181 struct drm_minor *minor;
42182 struct dentry *debugfs;
42183 };
42184diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c
42185index aa0bd054..aea6a01 100644
42186--- a/drivers/gpu/drm/ttm/ttm_bo_manager.c
42187+++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c
42188@@ -148,10 +148,10 @@ static void ttm_bo_man_debug(struct ttm_mem_type_manager *man,
42189 }
42190
42191 const struct ttm_mem_type_manager_func ttm_bo_manager_func = {
42192- ttm_bo_man_init,
42193- ttm_bo_man_takedown,
42194- ttm_bo_man_get_node,
42195- ttm_bo_man_put_node,
42196- ttm_bo_man_debug
42197+ .init = ttm_bo_man_init,
42198+ .takedown = ttm_bo_man_takedown,
42199+ .get_node = ttm_bo_man_get_node,
42200+ .put_node = ttm_bo_man_put_node,
42201+ .debug = ttm_bo_man_debug
42202 };
42203 EXPORT_SYMBOL(ttm_bo_manager_func);
42204diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
42205index a1803fb..c53f6b0 100644
42206--- a/drivers/gpu/drm/ttm/ttm_memory.c
42207+++ b/drivers/gpu/drm/ttm/ttm_memory.c
42208@@ -264,7 +264,7 @@ static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob,
42209 zone->glob = glob;
42210 glob->zone_kernel = zone;
42211 ret = kobject_init_and_add(
42212- &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
42213+ &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, "%s", zone->name);
42214 if (unlikely(ret != 0)) {
42215 kobject_put(&zone->kobj);
42216 return ret;
42217@@ -348,7 +348,7 @@ static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob,
42218 zone->glob = glob;
42219 glob->zone_dma32 = zone;
42220 ret = kobject_init_and_add(
42221- &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
42222+ &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, "%s", zone->name);
42223 if (unlikely(ret != 0)) {
42224 kobject_put(&zone->kobj);
42225 return ret;
42226diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
42227index 025c429..314062f 100644
42228--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
42229+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
42230@@ -54,7 +54,7 @@
42231
42232 #define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *))
42233 #define SMALL_ALLOCATION 16
42234-#define FREE_ALL_PAGES (~0U)
42235+#define FREE_ALL_PAGES (~0UL)
42236 /* times are in msecs */
42237 #define PAGE_FREE_INTERVAL 1000
42238
42239@@ -299,15 +299,14 @@ static void ttm_pool_update_free_locked(struct ttm_page_pool *pool,
42240 * @free_all: If set to true will free all pages in pool
42241 * @use_static: Safe to use static buffer
42242 **/
42243-static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free,
42244+static unsigned long ttm_page_pool_free(struct ttm_page_pool *pool, unsigned long nr_free,
42245 bool use_static)
42246 {
42247 static struct page *static_buf[NUM_PAGES_TO_ALLOC];
42248 unsigned long irq_flags;
42249 struct page *p;
42250 struct page **pages_to_free;
42251- unsigned freed_pages = 0,
42252- npages_to_free = nr_free;
42253+ unsigned long freed_pages = 0, npages_to_free = nr_free;
42254
42255 if (NUM_PAGES_TO_ALLOC < nr_free)
42256 npages_to_free = NUM_PAGES_TO_ALLOC;
42257@@ -371,7 +370,8 @@ restart:
42258 __list_del(&p->lru, &pool->list);
42259
42260 ttm_pool_update_free_locked(pool, freed_pages);
42261- nr_free -= freed_pages;
42262+ if (likely(nr_free != FREE_ALL_PAGES))
42263+ nr_free -= freed_pages;
42264 }
42265
42266 spin_unlock_irqrestore(&pool->lock, irq_flags);
42267@@ -399,7 +399,7 @@ ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
42268 unsigned i;
42269 unsigned pool_offset;
42270 struct ttm_page_pool *pool;
42271- int shrink_pages = sc->nr_to_scan;
42272+ unsigned long shrink_pages = sc->nr_to_scan;
42273 unsigned long freed = 0;
42274
42275 if (!mutex_trylock(&lock))
42276@@ -407,7 +407,7 @@ ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
42277 pool_offset = ++start_pool % NUM_POOLS;
42278 /* select start pool in round robin fashion */
42279 for (i = 0; i < NUM_POOLS; ++i) {
42280- unsigned nr_free = shrink_pages;
42281+ unsigned long nr_free = shrink_pages;
42282 if (shrink_pages == 0)
42283 break;
42284 pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
42285@@ -673,7 +673,7 @@ out:
42286 }
42287
42288 /* Put all pages in pages list to correct pool to wait for reuse */
42289-static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
42290+static void ttm_put_pages(struct page **pages, unsigned long npages, int flags,
42291 enum ttm_caching_state cstate)
42292 {
42293 unsigned long irq_flags;
42294@@ -728,7 +728,7 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
42295 struct list_head plist;
42296 struct page *p = NULL;
42297 gfp_t gfp_flags = GFP_USER;
42298- unsigned count;
42299+ unsigned long count;
42300 int r;
42301
42302 /* set zero flag for page allocation if required */
42303diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
42304index 01e1d27..aaa018a 100644
42305--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
42306+++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
42307@@ -56,7 +56,7 @@
42308
42309 #define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *))
42310 #define SMALL_ALLOCATION 4
42311-#define FREE_ALL_PAGES (~0U)
42312+#define FREE_ALL_PAGES (~0UL)
42313 /* times are in msecs */
42314 #define IS_UNDEFINED (0)
42315 #define IS_WC (1<<1)
42316@@ -413,7 +413,7 @@ static void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page)
42317 * @nr_free: If set to true will free all pages in pool
42318 * @use_static: Safe to use static buffer
42319 **/
42320-static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free,
42321+static unsigned long ttm_dma_page_pool_free(struct dma_pool *pool, unsigned long nr_free,
42322 bool use_static)
42323 {
42324 static struct page *static_buf[NUM_PAGES_TO_ALLOC];
42325@@ -421,8 +421,7 @@ static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free,
42326 struct dma_page *dma_p, *tmp;
42327 struct page **pages_to_free;
42328 struct list_head d_pages;
42329- unsigned freed_pages = 0,
42330- npages_to_free = nr_free;
42331+ unsigned long freed_pages = 0, npages_to_free = nr_free;
42332
42333 if (NUM_PAGES_TO_ALLOC < nr_free)
42334 npages_to_free = NUM_PAGES_TO_ALLOC;
42335@@ -499,7 +498,8 @@ restart:
42336 /* remove range of pages from the pool */
42337 if (freed_pages) {
42338 ttm_pool_update_free_locked(pool, freed_pages);
42339- nr_free -= freed_pages;
42340+ if (likely(nr_free != FREE_ALL_PAGES))
42341+ nr_free -= freed_pages;
42342 }
42343
42344 spin_unlock_irqrestore(&pool->lock, irq_flags);
42345@@ -936,7 +936,7 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
42346 struct dma_page *d_page, *next;
42347 enum pool_type type;
42348 bool is_cached = false;
42349- unsigned count = 0, i, npages = 0;
42350+ unsigned long count = 0, i, npages = 0;
42351 unsigned long irq_flags;
42352
42353 type = ttm_to_type(ttm->page_flags, ttm->caching_state);
42354@@ -1012,7 +1012,7 @@ ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
42355 static unsigned start_pool;
42356 unsigned idx = 0;
42357 unsigned pool_offset;
42358- unsigned shrink_pages = sc->nr_to_scan;
42359+ unsigned long shrink_pages = sc->nr_to_scan;
42360 struct device_pools *p;
42361 unsigned long freed = 0;
42362
42363@@ -1025,7 +1025,7 @@ ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
42364 goto out;
42365 pool_offset = ++start_pool % _manager->npools;
42366 list_for_each_entry(p, &_manager->pools, pools) {
42367- unsigned nr_free;
42368+ unsigned long nr_free;
42369
42370 if (!p->dev)
42371 continue;
42372@@ -1039,7 +1039,7 @@ ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
42373 shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free, true);
42374 freed += nr_free - shrink_pages;
42375
42376- pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n",
42377+ pr_debug("%s: (%s:%d) Asked to shrink %lu, have %lu more to go\n",
42378 p->pool->dev_name, p->pool->name, current->pid,
42379 nr_free, shrink_pages);
42380 }
42381diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
42382index 5fc16ce..1bd84ec 100644
42383--- a/drivers/gpu/drm/udl/udl_fb.c
42384+++ b/drivers/gpu/drm/udl/udl_fb.c
42385@@ -367,7 +367,6 @@ static int udl_fb_release(struct fb_info *info, int user)
42386 fb_deferred_io_cleanup(info);
42387 kfree(info->fbdefio);
42388 info->fbdefio = NULL;
42389- info->fbops->fb_mmap = udl_fb_mmap;
42390 }
42391
42392 pr_warn("released /dev/fb%d user=%d count=%d\n",
42393diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
42394index ef8c500..01030c8 100644
42395--- a/drivers/gpu/drm/via/via_drv.h
42396+++ b/drivers/gpu/drm/via/via_drv.h
42397@@ -53,7 +53,7 @@ typedef struct drm_via_ring_buffer {
42398 typedef uint32_t maskarray_t[5];
42399
42400 typedef struct drm_via_irq {
42401- atomic_t irq_received;
42402+ atomic_unchecked_t irq_received;
42403 uint32_t pending_mask;
42404 uint32_t enable_mask;
42405 wait_queue_head_t irq_queue;
42406@@ -77,7 +77,7 @@ typedef struct drm_via_private {
42407 struct timeval last_vblank;
42408 int last_vblank_valid;
42409 unsigned usec_per_vblank;
42410- atomic_t vbl_received;
42411+ atomic_unchecked_t vbl_received;
42412 drm_via_state_t hc_state;
42413 char pci_buf[VIA_PCI_BUF_SIZE];
42414 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
42415diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
42416index 1319433..a993b0c 100644
42417--- a/drivers/gpu/drm/via/via_irq.c
42418+++ b/drivers/gpu/drm/via/via_irq.c
42419@@ -101,7 +101,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
42420 if (crtc != 0)
42421 return 0;
42422
42423- return atomic_read(&dev_priv->vbl_received);
42424+ return atomic_read_unchecked(&dev_priv->vbl_received);
42425 }
42426
42427 irqreturn_t via_driver_irq_handler(int irq, void *arg)
42428@@ -116,8 +116,8 @@ irqreturn_t via_driver_irq_handler(int irq, void *arg)
42429
42430 status = VIA_READ(VIA_REG_INTERRUPT);
42431 if (status & VIA_IRQ_VBLANK_PENDING) {
42432- atomic_inc(&dev_priv->vbl_received);
42433- if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
42434+ atomic_inc_unchecked(&dev_priv->vbl_received);
42435+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
42436 do_gettimeofday(&cur_vblank);
42437 if (dev_priv->last_vblank_valid) {
42438 dev_priv->usec_per_vblank =
42439@@ -127,7 +127,7 @@ irqreturn_t via_driver_irq_handler(int irq, void *arg)
42440 dev_priv->last_vblank = cur_vblank;
42441 dev_priv->last_vblank_valid = 1;
42442 }
42443- if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
42444+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
42445 DRM_DEBUG("US per vblank is: %u\n",
42446 dev_priv->usec_per_vblank);
42447 }
42448@@ -137,7 +137,7 @@ irqreturn_t via_driver_irq_handler(int irq, void *arg)
42449
42450 for (i = 0; i < dev_priv->num_irqs; ++i) {
42451 if (status & cur_irq->pending_mask) {
42452- atomic_inc(&cur_irq->irq_received);
42453+ atomic_inc_unchecked(&cur_irq->irq_received);
42454 wake_up(&cur_irq->irq_queue);
42455 handled = 1;
42456 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
42457@@ -242,11 +242,11 @@ via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence
42458 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * HZ,
42459 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
42460 masks[irq][4]));
42461- cur_irq_sequence = atomic_read(&cur_irq->irq_received);
42462+ cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
42463 } else {
42464 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * HZ,
42465 (((cur_irq_sequence =
42466- atomic_read(&cur_irq->irq_received)) -
42467+ atomic_read_unchecked(&cur_irq->irq_received)) -
42468 *sequence) <= (1 << 23)));
42469 }
42470 *sequence = cur_irq_sequence;
42471@@ -284,7 +284,7 @@ void via_driver_irq_preinstall(struct drm_device *dev)
42472 }
42473
42474 for (i = 0; i < dev_priv->num_irqs; ++i) {
42475- atomic_set(&cur_irq->irq_received, 0);
42476+ atomic_set_unchecked(&cur_irq->irq_received, 0);
42477 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
42478 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
42479 init_waitqueue_head(&cur_irq->irq_queue);
42480@@ -366,7 +366,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
42481 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
42482 case VIA_IRQ_RELATIVE:
42483 irqwait->request.sequence +=
42484- atomic_read(&cur_irq->irq_received);
42485+ atomic_read_unchecked(&cur_irq->irq_received);
42486 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
42487 case VIA_IRQ_ABSOLUTE:
42488 break;
42489diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
42490index d26a6da..5fa41ed 100644
42491--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
42492+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
42493@@ -447,7 +447,7 @@ struct vmw_private {
42494 * Fencing and IRQs.
42495 */
42496
42497- atomic_t marker_seq;
42498+ atomic_unchecked_t marker_seq;
42499 wait_queue_head_t fence_queue;
42500 wait_queue_head_t fifo_queue;
42501 spinlock_t waiter_lock;
42502diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
42503index 39f2b03..d1b0a64 100644
42504--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
42505+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
42506@@ -152,7 +152,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
42507 (unsigned int) min,
42508 (unsigned int) fifo->capabilities);
42509
42510- atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
42511+ atomic_set_unchecked(&dev_priv->marker_seq, dev_priv->last_read_seqno);
42512 iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
42513 vmw_marker_queue_init(&fifo->marker_queue);
42514 return vmw_fifo_send_fence(dev_priv, &dummy);
42515@@ -372,7 +372,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
42516 if (reserveable)
42517 iowrite32(bytes, fifo_mem +
42518 SVGA_FIFO_RESERVED);
42519- return fifo_mem + (next_cmd >> 2);
42520+ return (__le32 __force_kernel *)fifo_mem + (next_cmd >> 2);
42521 } else {
42522 need_bounce = true;
42523 }
42524@@ -492,7 +492,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
42525
42526 fm = vmw_fifo_reserve(dev_priv, bytes);
42527 if (unlikely(fm == NULL)) {
42528- *seqno = atomic_read(&dev_priv->marker_seq);
42529+ *seqno = atomic_read_unchecked(&dev_priv->marker_seq);
42530 ret = -ENOMEM;
42531 (void)vmw_fallback_wait(dev_priv, false, true, *seqno,
42532 false, 3*HZ);
42533@@ -500,7 +500,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
42534 }
42535
42536 do {
42537- *seqno = atomic_add_return(1, &dev_priv->marker_seq);
42538+ *seqno = atomic_add_return_unchecked(1, &dev_priv->marker_seq);
42539 } while (*seqno == 0);
42540
42541 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
42542diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
42543index 170b61b..fec7348 100644
42544--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
42545+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
42546@@ -164,9 +164,9 @@ static void vmw_gmrid_man_debug(struct ttm_mem_type_manager *man,
42547 }
42548
42549 const struct ttm_mem_type_manager_func vmw_gmrid_manager_func = {
42550- vmw_gmrid_man_init,
42551- vmw_gmrid_man_takedown,
42552- vmw_gmrid_man_get_node,
42553- vmw_gmrid_man_put_node,
42554- vmw_gmrid_man_debug
42555+ .init = vmw_gmrid_man_init,
42556+ .takedown = vmw_gmrid_man_takedown,
42557+ .get_node = vmw_gmrid_man_get_node,
42558+ .put_node = vmw_gmrid_man_put_node,
42559+ .debug = vmw_gmrid_man_debug
42560 };
42561diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
42562index 69c8ce2..cacb0ab 100644
42563--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
42564+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
42565@@ -235,7 +235,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
42566 int ret;
42567
42568 num_clips = arg->num_clips;
42569- clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr;
42570+ clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
42571
42572 if (unlikely(num_clips == 0))
42573 return 0;
42574@@ -318,7 +318,7 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
42575 int ret;
42576
42577 num_clips = arg->num_clips;
42578- clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr;
42579+ clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
42580
42581 if (unlikely(num_clips == 0))
42582 return 0;
42583diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
42584index 9fe9827..0aa2fc0 100644
42585--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
42586+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
42587@@ -102,7 +102,7 @@ bool vmw_seqno_passed(struct vmw_private *dev_priv,
42588 * emitted. Then the fence is stale and signaled.
42589 */
42590
42591- ret = ((atomic_read(&dev_priv->marker_seq) - seqno)
42592+ ret = ((atomic_read_unchecked(&dev_priv->marker_seq) - seqno)
42593 > VMW_FENCE_WRAP);
42594
42595 return ret;
42596@@ -133,7 +133,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
42597
42598 if (fifo_idle)
42599 down_read(&fifo_state->rwsem);
42600- signal_seq = atomic_read(&dev_priv->marker_seq);
42601+ signal_seq = atomic_read_unchecked(&dev_priv->marker_seq);
42602 ret = 0;
42603
42604 for (;;) {
42605diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
42606index efd1ffd..0ae13ca 100644
42607--- a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
42608+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
42609@@ -135,7 +135,7 @@ int vmw_wait_lag(struct vmw_private *dev_priv,
42610 while (!vmw_lag_lt(queue, us)) {
42611 spin_lock(&queue->lock);
42612 if (list_empty(&queue->head))
42613- seqno = atomic_read(&dev_priv->marker_seq);
42614+ seqno = atomic_read_unchecked(&dev_priv->marker_seq);
42615 else {
42616 marker = list_first_entry(&queue->head,
42617 struct vmw_marker, head);
42618diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
42619index 37ac7b5..d52a5c9 100644
42620--- a/drivers/gpu/vga/vga_switcheroo.c
42621+++ b/drivers/gpu/vga/vga_switcheroo.c
42622@@ -644,7 +644,7 @@ static int vga_switcheroo_runtime_resume(struct device *dev)
42623
42624 /* this version is for the case where the power switch is separate
42625 to the device being powered down. */
42626-int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain)
42627+int vga_switcheroo_init_domain_pm_ops(struct device *dev, dev_pm_domain_no_const *domain)
42628 {
42629 /* copy over all the bus versions */
42630 if (dev->bus && dev->bus->pm) {
42631@@ -695,7 +695,7 @@ static int vga_switcheroo_runtime_resume_hdmi_audio(struct device *dev)
42632 return ret;
42633 }
42634
42635-int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain)
42636+int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, dev_pm_domain_no_const *domain)
42637 {
42638 /* copy over all the bus versions */
42639 if (dev->bus && dev->bus->pm) {
42640diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
42641index 56ce8c2..32ce524 100644
42642--- a/drivers/hid/hid-core.c
42643+++ b/drivers/hid/hid-core.c
42644@@ -2531,7 +2531,7 @@ EXPORT_SYMBOL_GPL(hid_ignore);
42645
42646 int hid_add_device(struct hid_device *hdev)
42647 {
42648- static atomic_t id = ATOMIC_INIT(0);
42649+ static atomic_unchecked_t id = ATOMIC_INIT(0);
42650 int ret;
42651
42652 if (WARN_ON(hdev->status & HID_STAT_ADDED))
42653@@ -2574,7 +2574,7 @@ int hid_add_device(struct hid_device *hdev)
42654 /* XXX hack, any other cleaner solution after the driver core
42655 * is converted to allow more than 20 bytes as the device name? */
42656 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
42657- hdev->vendor, hdev->product, atomic_inc_return(&id));
42658+ hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
42659
42660 hid_debug_register(hdev, dev_name(&hdev->dev));
42661 ret = device_add(&hdev->dev);
42662diff --git a/drivers/hid/hid-wiimote-debug.c b/drivers/hid/hid-wiimote-debug.c
42663index c13fb5b..55a3802 100644
42664--- a/drivers/hid/hid-wiimote-debug.c
42665+++ b/drivers/hid/hid-wiimote-debug.c
42666@@ -66,7 +66,7 @@ static ssize_t wiidebug_eeprom_read(struct file *f, char __user *u, size_t s,
42667 else if (size == 0)
42668 return -EIO;
42669
42670- if (copy_to_user(u, buf, size))
42671+ if (size > sizeof(buf) || copy_to_user(u, buf, size))
42672 return -EFAULT;
42673
42674 *off += size;
42675diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
42676index 00bc30e..d8e5097 100644
42677--- a/drivers/hv/channel.c
42678+++ b/drivers/hv/channel.c
42679@@ -370,7 +370,7 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
42680 int ret = 0;
42681
42682 next_gpadl_handle =
42683- (atomic_inc_return(&vmbus_connection.next_gpadl_handle) - 1);
42684+ (atomic_inc_return_unchecked(&vmbus_connection.next_gpadl_handle) - 1);
42685
42686 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
42687 if (ret)
42688diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
42689index 50e51a5..b0bfd78 100644
42690--- a/drivers/hv/hv.c
42691+++ b/drivers/hv/hv.c
42692@@ -118,7 +118,7 @@ static u64 do_hypercall(u64 control, void *input, void *output)
42693 u64 output_address = (output) ? virt_to_phys(output) : 0;
42694 u32 output_address_hi = output_address >> 32;
42695 u32 output_address_lo = output_address & 0xFFFFFFFF;
42696- void *hypercall_page = hv_context.hypercall_page;
42697+ void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
42698
42699 __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
42700 "=a"(hv_status_lo) : "d" (control_hi),
42701@@ -164,7 +164,7 @@ int hv_init(void)
42702 /* See if the hypercall page is already set */
42703 rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
42704
42705- virtaddr = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL_EXEC);
42706+ virtaddr = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL_RX);
42707
42708 if (!virtaddr)
42709 goto cleanup;
42710diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
42711index ff16938..e60879c 100644
42712--- a/drivers/hv/hv_balloon.c
42713+++ b/drivers/hv/hv_balloon.c
42714@@ -470,7 +470,7 @@ MODULE_PARM_DESC(hot_add, "If set attempt memory hot_add");
42715
42716 module_param(pressure_report_delay, uint, (S_IRUGO | S_IWUSR));
42717 MODULE_PARM_DESC(pressure_report_delay, "Delay in secs in reporting pressure");
42718-static atomic_t trans_id = ATOMIC_INIT(0);
42719+static atomic_unchecked_t trans_id = ATOMIC_INIT(0);
42720
42721 static int dm_ring_size = (5 * PAGE_SIZE);
42722
42723@@ -947,7 +947,7 @@ static void hot_add_req(struct work_struct *dummy)
42724 pr_info("Memory hot add failed\n");
42725
42726 dm->state = DM_INITIALIZED;
42727- resp.hdr.trans_id = atomic_inc_return(&trans_id);
42728+ resp.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
42729 vmbus_sendpacket(dm->dev->channel, &resp,
42730 sizeof(struct dm_hot_add_response),
42731 (unsigned long)NULL,
42732@@ -1028,7 +1028,7 @@ static void post_status(struct hv_dynmem_device *dm)
42733 memset(&status, 0, sizeof(struct dm_status));
42734 status.hdr.type = DM_STATUS_REPORT;
42735 status.hdr.size = sizeof(struct dm_status);
42736- status.hdr.trans_id = atomic_inc_return(&trans_id);
42737+ status.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
42738
42739 /*
42740 * The host expects the guest to report free memory.
42741@@ -1048,7 +1048,7 @@ static void post_status(struct hv_dynmem_device *dm)
42742 * send the status. This can happen if we were interrupted
42743 * after we picked our transaction ID.
42744 */
42745- if (status.hdr.trans_id != atomic_read(&trans_id))
42746+ if (status.hdr.trans_id != atomic_read_unchecked(&trans_id))
42747 return;
42748
42749 /*
42750@@ -1188,7 +1188,7 @@ static void balloon_up(struct work_struct *dummy)
42751 */
42752
42753 do {
42754- bl_resp->hdr.trans_id = atomic_inc_return(&trans_id);
42755+ bl_resp->hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
42756 ret = vmbus_sendpacket(dm_device.dev->channel,
42757 bl_resp,
42758 bl_resp->hdr.size,
42759@@ -1234,7 +1234,7 @@ static void balloon_down(struct hv_dynmem_device *dm,
42760
42761 memset(&resp, 0, sizeof(struct dm_unballoon_response));
42762 resp.hdr.type = DM_UNBALLOON_RESPONSE;
42763- resp.hdr.trans_id = atomic_inc_return(&trans_id);
42764+ resp.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
42765 resp.hdr.size = sizeof(struct dm_unballoon_response);
42766
42767 vmbus_sendpacket(dm_device.dev->channel, &resp,
42768@@ -1295,7 +1295,7 @@ static void version_resp(struct hv_dynmem_device *dm,
42769 memset(&version_req, 0, sizeof(struct dm_version_request));
42770 version_req.hdr.type = DM_VERSION_REQUEST;
42771 version_req.hdr.size = sizeof(struct dm_version_request);
42772- version_req.hdr.trans_id = atomic_inc_return(&trans_id);
42773+ version_req.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
42774 version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN7;
42775 version_req.is_last_attempt = 1;
42776
42777@@ -1468,7 +1468,7 @@ static int balloon_probe(struct hv_device *dev,
42778 memset(&version_req, 0, sizeof(struct dm_version_request));
42779 version_req.hdr.type = DM_VERSION_REQUEST;
42780 version_req.hdr.size = sizeof(struct dm_version_request);
42781- version_req.hdr.trans_id = atomic_inc_return(&trans_id);
42782+ version_req.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
42783 version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN8;
42784 version_req.is_last_attempt = 0;
42785
42786@@ -1499,7 +1499,7 @@ static int balloon_probe(struct hv_device *dev,
42787 memset(&cap_msg, 0, sizeof(struct dm_capabilities));
42788 cap_msg.hdr.type = DM_CAPABILITIES_REPORT;
42789 cap_msg.hdr.size = sizeof(struct dm_capabilities);
42790- cap_msg.hdr.trans_id = atomic_inc_return(&trans_id);
42791+ cap_msg.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
42792
42793 cap_msg.caps.cap_bits.balloon = 1;
42794 cap_msg.caps.cap_bits.hot_add = 1;
42795diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
42796index 44b1c94..6dccc2c 100644
42797--- a/drivers/hv/hyperv_vmbus.h
42798+++ b/drivers/hv/hyperv_vmbus.h
42799@@ -632,7 +632,7 @@ enum vmbus_connect_state {
42800 struct vmbus_connection {
42801 enum vmbus_connect_state conn_state;
42802
42803- atomic_t next_gpadl_handle;
42804+ atomic_unchecked_t next_gpadl_handle;
42805
42806 /*
42807 * Represents channel interrupts. Each bit position represents a
42808diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
42809index f518b8d7..4bc0b64 100644
42810--- a/drivers/hv/vmbus_drv.c
42811+++ b/drivers/hv/vmbus_drv.c
42812@@ -840,10 +840,10 @@ int vmbus_device_register(struct hv_device *child_device_obj)
42813 {
42814 int ret = 0;
42815
42816- static atomic_t device_num = ATOMIC_INIT(0);
42817+ static atomic_unchecked_t device_num = ATOMIC_INIT(0);
42818
42819 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
42820- atomic_inc_return(&device_num));
42821+ atomic_inc_return_unchecked(&device_num));
42822
42823 child_device_obj->device.bus = &hv_bus;
42824 child_device_obj->device.parent = &hv_acpi_dev->dev;
42825diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
42826index 579bdf9..0dac21d5 100644
42827--- a/drivers/hwmon/acpi_power_meter.c
42828+++ b/drivers/hwmon/acpi_power_meter.c
42829@@ -116,7 +116,7 @@ struct sensor_template {
42830 struct device_attribute *devattr,
42831 const char *buf, size_t count);
42832 int index;
42833-};
42834+} __do_const;
42835
42836 /* Averaging interval */
42837 static int update_avg_interval(struct acpi_power_meter_resource *resource)
42838@@ -631,7 +631,7 @@ static int register_attrs(struct acpi_power_meter_resource *resource,
42839 struct sensor_template *attrs)
42840 {
42841 struct device *dev = &resource->acpi_dev->dev;
42842- struct sensor_device_attribute *sensors =
42843+ sensor_device_attribute_no_const *sensors =
42844 &resource->sensors[resource->num_sensors];
42845 int res = 0;
42846
42847@@ -973,7 +973,7 @@ static int __init enable_cap_knobs(const struct dmi_system_id *d)
42848 return 0;
42849 }
42850
42851-static struct dmi_system_id __initdata pm_dmi_table[] = {
42852+static const struct dmi_system_id __initconst pm_dmi_table[] = {
42853 {
42854 enable_cap_knobs, "IBM Active Energy Manager",
42855 {
42856diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
42857index 0af63da..05a183a 100644
42858--- a/drivers/hwmon/applesmc.c
42859+++ b/drivers/hwmon/applesmc.c
42860@@ -1105,7 +1105,7 @@ static int applesmc_create_nodes(struct applesmc_node_group *groups, int num)
42861 {
42862 struct applesmc_node_group *grp;
42863 struct applesmc_dev_attr *node;
42864- struct attribute *attr;
42865+ attribute_no_const *attr;
42866 int ret, i;
42867
42868 for (grp = groups; grp->format; grp++) {
42869diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c
42870index cccef87..06ce8ec 100644
42871--- a/drivers/hwmon/asus_atk0110.c
42872+++ b/drivers/hwmon/asus_atk0110.c
42873@@ -147,10 +147,10 @@ MODULE_DEVICE_TABLE(acpi, atk_ids);
42874 struct atk_sensor_data {
42875 struct list_head list;
42876 struct atk_data *data;
42877- struct device_attribute label_attr;
42878- struct device_attribute input_attr;
42879- struct device_attribute limit1_attr;
42880- struct device_attribute limit2_attr;
42881+ device_attribute_no_const label_attr;
42882+ device_attribute_no_const input_attr;
42883+ device_attribute_no_const limit1_attr;
42884+ device_attribute_no_const limit2_attr;
42885 char label_attr_name[ATTR_NAME_SIZE];
42886 char input_attr_name[ATTR_NAME_SIZE];
42887 char limit1_attr_name[ATTR_NAME_SIZE];
42888@@ -270,7 +270,7 @@ static ssize_t atk_name_show(struct device *dev,
42889 static struct device_attribute atk_name_attr =
42890 __ATTR(name, 0444, atk_name_show, NULL);
42891
42892-static void atk_init_attribute(struct device_attribute *attr, char *name,
42893+static void atk_init_attribute(device_attribute_no_const *attr, char *name,
42894 sysfs_show_func show)
42895 {
42896 sysfs_attr_init(&attr->attr);
42897diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
42898index 5b7fec8..05c957a 100644
42899--- a/drivers/hwmon/coretemp.c
42900+++ b/drivers/hwmon/coretemp.c
42901@@ -783,7 +783,7 @@ static int coretemp_cpu_callback(struct notifier_block *nfb,
42902 return NOTIFY_OK;
42903 }
42904
42905-static struct notifier_block coretemp_cpu_notifier __refdata = {
42906+static struct notifier_block coretemp_cpu_notifier = {
42907 .notifier_call = coretemp_cpu_callback,
42908 };
42909
42910diff --git a/drivers/hwmon/ibmaem.c b/drivers/hwmon/ibmaem.c
42911index 7a8a6fb..015c1fd 100644
42912--- a/drivers/hwmon/ibmaem.c
42913+++ b/drivers/hwmon/ibmaem.c
42914@@ -924,7 +924,7 @@ static int aem_register_sensors(struct aem_data *data,
42915 struct aem_rw_sensor_template *rw)
42916 {
42917 struct device *dev = &data->pdev->dev;
42918- struct sensor_device_attribute *sensors = data->sensors;
42919+ sensor_device_attribute_no_const *sensors = data->sensors;
42920 int err;
42921
42922 /* Set up read-only sensors */
42923diff --git a/drivers/hwmon/iio_hwmon.c b/drivers/hwmon/iio_hwmon.c
42924index 17ae2eb..21b71dd 100644
42925--- a/drivers/hwmon/iio_hwmon.c
42926+++ b/drivers/hwmon/iio_hwmon.c
42927@@ -61,7 +61,7 @@ static int iio_hwmon_probe(struct platform_device *pdev)
42928 {
42929 struct device *dev = &pdev->dev;
42930 struct iio_hwmon_state *st;
42931- struct sensor_device_attribute *a;
42932+ sensor_device_attribute_no_const *a;
42933 int ret, i;
42934 int in_i = 1, temp_i = 1, curr_i = 1, humidity_i = 1;
42935 enum iio_chan_type type;
42936diff --git a/drivers/hwmon/nct6683.c b/drivers/hwmon/nct6683.c
42937index 37f0170..414ec2c 100644
42938--- a/drivers/hwmon/nct6683.c
42939+++ b/drivers/hwmon/nct6683.c
42940@@ -397,11 +397,11 @@ static struct attribute_group *
42941 nct6683_create_attr_group(struct device *dev, struct sensor_template_group *tg,
42942 int repeat)
42943 {
42944- struct sensor_device_attribute_2 *a2;
42945- struct sensor_device_attribute *a;
42946+ sensor_device_attribute_2_no_const *a2;
42947+ sensor_device_attribute_no_const *a;
42948 struct sensor_device_template **t;
42949 struct sensor_device_attr_u *su;
42950- struct attribute_group *group;
42951+ attribute_group_no_const *group;
42952 struct attribute **attrs;
42953 int i, j, count;
42954
42955diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
42956index 0773930..6f04305 100644
42957--- a/drivers/hwmon/nct6775.c
42958+++ b/drivers/hwmon/nct6775.c
42959@@ -952,10 +952,10 @@ static struct attribute_group *
42960 nct6775_create_attr_group(struct device *dev, struct sensor_template_group *tg,
42961 int repeat)
42962 {
42963- struct attribute_group *group;
42964+ attribute_group_no_const *group;
42965 struct sensor_device_attr_u *su;
42966- struct sensor_device_attribute *a;
42967- struct sensor_device_attribute_2 *a2;
42968+ sensor_device_attribute_no_const *a;
42969+ sensor_device_attribute_2_no_const *a2;
42970 struct attribute **attrs;
42971 struct sensor_device_template **t;
42972 int i, count;
42973diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
42974index f2e47c7..45d7941 100644
42975--- a/drivers/hwmon/pmbus/pmbus_core.c
42976+++ b/drivers/hwmon/pmbus/pmbus_core.c
42977@@ -816,7 +816,7 @@ static int pmbus_add_attribute(struct pmbus_data *data, struct attribute *attr)
42978 return 0;
42979 }
42980
42981-static void pmbus_dev_attr_init(struct device_attribute *dev_attr,
42982+static void pmbus_dev_attr_init(device_attribute_no_const *dev_attr,
42983 const char *name,
42984 umode_t mode,
42985 ssize_t (*show)(struct device *dev,
42986@@ -833,7 +833,7 @@ static void pmbus_dev_attr_init(struct device_attribute *dev_attr,
42987 dev_attr->store = store;
42988 }
42989
42990-static void pmbus_attr_init(struct sensor_device_attribute *a,
42991+static void pmbus_attr_init(sensor_device_attribute_no_const *a,
42992 const char *name,
42993 umode_t mode,
42994 ssize_t (*show)(struct device *dev,
42995@@ -855,7 +855,7 @@ static int pmbus_add_boolean(struct pmbus_data *data,
42996 u16 reg, u8 mask)
42997 {
42998 struct pmbus_boolean *boolean;
42999- struct sensor_device_attribute *a;
43000+ sensor_device_attribute_no_const *a;
43001
43002 boolean = devm_kzalloc(data->dev, sizeof(*boolean), GFP_KERNEL);
43003 if (!boolean)
43004@@ -880,7 +880,7 @@ static struct pmbus_sensor *pmbus_add_sensor(struct pmbus_data *data,
43005 bool update, bool readonly)
43006 {
43007 struct pmbus_sensor *sensor;
43008- struct device_attribute *a;
43009+ device_attribute_no_const *a;
43010
43011 sensor = devm_kzalloc(data->dev, sizeof(*sensor), GFP_KERNEL);
43012 if (!sensor)
43013@@ -911,7 +911,7 @@ static int pmbus_add_label(struct pmbus_data *data,
43014 const char *lstring, int index)
43015 {
43016 struct pmbus_label *label;
43017- struct device_attribute *a;
43018+ device_attribute_no_const *a;
43019
43020 label = devm_kzalloc(data->dev, sizeof(*label), GFP_KERNEL);
43021 if (!label)
43022diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
43023index d4f0935..7420593 100644
43024--- a/drivers/hwmon/sht15.c
43025+++ b/drivers/hwmon/sht15.c
43026@@ -169,7 +169,7 @@ struct sht15_data {
43027 int supply_uv;
43028 bool supply_uv_valid;
43029 struct work_struct update_supply_work;
43030- atomic_t interrupt_handled;
43031+ atomic_unchecked_t interrupt_handled;
43032 };
43033
43034 /**
43035@@ -542,13 +542,13 @@ static int sht15_measurement(struct sht15_data *data,
43036 ret = gpio_direction_input(data->pdata->gpio_data);
43037 if (ret)
43038 return ret;
43039- atomic_set(&data->interrupt_handled, 0);
43040+ atomic_set_unchecked(&data->interrupt_handled, 0);
43041
43042 enable_irq(gpio_to_irq(data->pdata->gpio_data));
43043 if (gpio_get_value(data->pdata->gpio_data) == 0) {
43044 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
43045 /* Only relevant if the interrupt hasn't occurred. */
43046- if (!atomic_read(&data->interrupt_handled))
43047+ if (!atomic_read_unchecked(&data->interrupt_handled))
43048 schedule_work(&data->read_work);
43049 }
43050 ret = wait_event_timeout(data->wait_queue,
43051@@ -820,7 +820,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d)
43052
43053 /* First disable the interrupt */
43054 disable_irq_nosync(irq);
43055- atomic_inc(&data->interrupt_handled);
43056+ atomic_inc_unchecked(&data->interrupt_handled);
43057 /* Then schedule a reading work struct */
43058 if (data->state != SHT15_READING_NOTHING)
43059 schedule_work(&data->read_work);
43060@@ -842,11 +842,11 @@ static void sht15_bh_read_data(struct work_struct *work_s)
43061 * If not, then start the interrupt again - care here as could
43062 * have gone low in meantime so verify it hasn't!
43063 */
43064- atomic_set(&data->interrupt_handled, 0);
43065+ atomic_set_unchecked(&data->interrupt_handled, 0);
43066 enable_irq(gpio_to_irq(data->pdata->gpio_data));
43067 /* If still not occurred or another handler was scheduled */
43068 if (gpio_get_value(data->pdata->gpio_data)
43069- || atomic_read(&data->interrupt_handled))
43070+ || atomic_read_unchecked(&data->interrupt_handled))
43071 return;
43072 }
43073
43074diff --git a/drivers/hwmon/via-cputemp.c b/drivers/hwmon/via-cputemp.c
43075index ac91c07..8e69663 100644
43076--- a/drivers/hwmon/via-cputemp.c
43077+++ b/drivers/hwmon/via-cputemp.c
43078@@ -295,7 +295,7 @@ static int via_cputemp_cpu_callback(struct notifier_block *nfb,
43079 return NOTIFY_OK;
43080 }
43081
43082-static struct notifier_block via_cputemp_cpu_notifier __refdata = {
43083+static struct notifier_block via_cputemp_cpu_notifier = {
43084 .notifier_call = via_cputemp_cpu_callback,
43085 };
43086
43087diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
43088index 65e3240..e6c511d 100644
43089--- a/drivers/i2c/busses/i2c-amd756-s4882.c
43090+++ b/drivers/i2c/busses/i2c-amd756-s4882.c
43091@@ -39,7 +39,7 @@
43092 extern struct i2c_adapter amd756_smbus;
43093
43094 static struct i2c_adapter *s4882_adapter;
43095-static struct i2c_algorithm *s4882_algo;
43096+static i2c_algorithm_no_const *s4882_algo;
43097
43098 /* Wrapper access functions for multiplexed SMBus */
43099 static DEFINE_MUTEX(amd756_lock);
43100diff --git a/drivers/i2c/busses/i2c-diolan-u2c.c b/drivers/i2c/busses/i2c-diolan-u2c.c
43101index b19a310..d6eece0 100644
43102--- a/drivers/i2c/busses/i2c-diolan-u2c.c
43103+++ b/drivers/i2c/busses/i2c-diolan-u2c.c
43104@@ -98,7 +98,7 @@ MODULE_PARM_DESC(frequency, "I2C clock frequency in hertz");
43105 /* usb layer */
43106
43107 /* Send command to device, and get response. */
43108-static int diolan_usb_transfer(struct i2c_diolan_u2c *dev)
43109+static int __intentional_overflow(-1) diolan_usb_transfer(struct i2c_diolan_u2c *dev)
43110 {
43111 int ret = 0;
43112 int actual;
43113diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
43114index 88eda09..cf40434 100644
43115--- a/drivers/i2c/busses/i2c-nforce2-s4985.c
43116+++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
43117@@ -37,7 +37,7 @@
43118 extern struct i2c_adapter *nforce2_smbus;
43119
43120 static struct i2c_adapter *s4985_adapter;
43121-static struct i2c_algorithm *s4985_algo;
43122+static i2c_algorithm_no_const *s4985_algo;
43123
43124 /* Wrapper access functions for multiplexed SMBus */
43125 static DEFINE_MUTEX(nforce2_lock);
43126diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
43127index 71c7a39..71dd3e0 100644
43128--- a/drivers/i2c/i2c-dev.c
43129+++ b/drivers/i2c/i2c-dev.c
43130@@ -272,7 +272,7 @@ static noinline int i2cdev_ioctl_rdrw(struct i2c_client *client,
43131 break;
43132 }
43133
43134- data_ptrs[i] = (u8 __user *)rdwr_pa[i].buf;
43135+ data_ptrs[i] = (u8 __force_user *)rdwr_pa[i].buf;
43136 rdwr_pa[i].buf = memdup_user(data_ptrs[i], rdwr_pa[i].len);
43137 if (IS_ERR(rdwr_pa[i].buf)) {
43138 res = PTR_ERR(rdwr_pa[i].buf);
43139diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
43140index 0b510ba..4fbb5085 100644
43141--- a/drivers/ide/ide-cd.c
43142+++ b/drivers/ide/ide-cd.c
43143@@ -768,7 +768,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
43144 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
43145 if ((unsigned long)buf & alignment
43146 || blk_rq_bytes(rq) & q->dma_pad_mask
43147- || object_is_on_stack(buf))
43148+ || object_starts_on_stack(buf))
43149 drive->dma = 0;
43150 }
43151 }
43152diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
43153index 4df97f6..c751151 100644
43154--- a/drivers/iio/industrialio-core.c
43155+++ b/drivers/iio/industrialio-core.c
43156@@ -570,7 +570,7 @@ static ssize_t iio_write_channel_info(struct device *dev,
43157 }
43158
43159 static
43160-int __iio_device_attr_init(struct device_attribute *dev_attr,
43161+int __iio_device_attr_init(device_attribute_no_const *dev_attr,
43162 const char *postfix,
43163 struct iio_chan_spec const *chan,
43164 ssize_t (*readfunc)(struct device *dev,
43165diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
43166index e28a494..f7c2671 100644
43167--- a/drivers/infiniband/core/cm.c
43168+++ b/drivers/infiniband/core/cm.c
43169@@ -115,7 +115,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
43170
43171 struct cm_counter_group {
43172 struct kobject obj;
43173- atomic_long_t counter[CM_ATTR_COUNT];
43174+ atomic_long_unchecked_t counter[CM_ATTR_COUNT];
43175 };
43176
43177 struct cm_counter_attribute {
43178@@ -1398,7 +1398,7 @@ static void cm_dup_req_handler(struct cm_work *work,
43179 struct ib_mad_send_buf *msg = NULL;
43180 int ret;
43181
43182- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
43183+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
43184 counter[CM_REQ_COUNTER]);
43185
43186 /* Quick state check to discard duplicate REQs. */
43187@@ -1785,7 +1785,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
43188 if (!cm_id_priv)
43189 return;
43190
43191- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
43192+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
43193 counter[CM_REP_COUNTER]);
43194 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
43195 if (ret)
43196@@ -1952,7 +1952,7 @@ static int cm_rtu_handler(struct cm_work *work)
43197 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
43198 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
43199 spin_unlock_irq(&cm_id_priv->lock);
43200- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
43201+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
43202 counter[CM_RTU_COUNTER]);
43203 goto out;
43204 }
43205@@ -2135,7 +2135,7 @@ static int cm_dreq_handler(struct cm_work *work)
43206 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
43207 dreq_msg->local_comm_id);
43208 if (!cm_id_priv) {
43209- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
43210+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
43211 counter[CM_DREQ_COUNTER]);
43212 cm_issue_drep(work->port, work->mad_recv_wc);
43213 return -EINVAL;
43214@@ -2160,7 +2160,7 @@ static int cm_dreq_handler(struct cm_work *work)
43215 case IB_CM_MRA_REP_RCVD:
43216 break;
43217 case IB_CM_TIMEWAIT:
43218- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
43219+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
43220 counter[CM_DREQ_COUNTER]);
43221 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
43222 goto unlock;
43223@@ -2174,7 +2174,7 @@ static int cm_dreq_handler(struct cm_work *work)
43224 cm_free_msg(msg);
43225 goto deref;
43226 case IB_CM_DREQ_RCVD:
43227- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
43228+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
43229 counter[CM_DREQ_COUNTER]);
43230 goto unlock;
43231 default:
43232@@ -2541,7 +2541,7 @@ static int cm_mra_handler(struct cm_work *work)
43233 ib_modify_mad(cm_id_priv->av.port->mad_agent,
43234 cm_id_priv->msg, timeout)) {
43235 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
43236- atomic_long_inc(&work->port->
43237+ atomic_long_inc_unchecked(&work->port->
43238 counter_group[CM_RECV_DUPLICATES].
43239 counter[CM_MRA_COUNTER]);
43240 goto out;
43241@@ -2550,7 +2550,7 @@ static int cm_mra_handler(struct cm_work *work)
43242 break;
43243 case IB_CM_MRA_REQ_RCVD:
43244 case IB_CM_MRA_REP_RCVD:
43245- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
43246+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
43247 counter[CM_MRA_COUNTER]);
43248 /* fall through */
43249 default:
43250@@ -2712,7 +2712,7 @@ static int cm_lap_handler(struct cm_work *work)
43251 case IB_CM_LAP_IDLE:
43252 break;
43253 case IB_CM_MRA_LAP_SENT:
43254- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
43255+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
43256 counter[CM_LAP_COUNTER]);
43257 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
43258 goto unlock;
43259@@ -2728,7 +2728,7 @@ static int cm_lap_handler(struct cm_work *work)
43260 cm_free_msg(msg);
43261 goto deref;
43262 case IB_CM_LAP_RCVD:
43263- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
43264+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
43265 counter[CM_LAP_COUNTER]);
43266 goto unlock;
43267 default:
43268@@ -3012,7 +3012,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
43269 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
43270 if (cur_cm_id_priv) {
43271 spin_unlock_irq(&cm.lock);
43272- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
43273+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
43274 counter[CM_SIDR_REQ_COUNTER]);
43275 goto out; /* Duplicate message. */
43276 }
43277@@ -3224,10 +3224,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
43278 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
43279 msg->retries = 1;
43280
43281- atomic_long_add(1 + msg->retries,
43282+ atomic_long_add_unchecked(1 + msg->retries,
43283 &port->counter_group[CM_XMIT].counter[attr_index]);
43284 if (msg->retries)
43285- atomic_long_add(msg->retries,
43286+ atomic_long_add_unchecked(msg->retries,
43287 &port->counter_group[CM_XMIT_RETRIES].
43288 counter[attr_index]);
43289
43290@@ -3437,7 +3437,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
43291 }
43292
43293 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
43294- atomic_long_inc(&port->counter_group[CM_RECV].
43295+ atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
43296 counter[attr_id - CM_ATTR_ID_OFFSET]);
43297
43298 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
43299@@ -3668,7 +3668,7 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
43300 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
43301
43302 return sprintf(buf, "%ld\n",
43303- atomic_long_read(&group->counter[cm_attr->index]));
43304+ atomic_long_read_unchecked(&group->counter[cm_attr->index]));
43305 }
43306
43307 static const struct sysfs_ops cm_counter_ops = {
43308diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
43309index 9f5ad7c..588cd84 100644
43310--- a/drivers/infiniband/core/fmr_pool.c
43311+++ b/drivers/infiniband/core/fmr_pool.c
43312@@ -98,8 +98,8 @@ struct ib_fmr_pool {
43313
43314 struct task_struct *thread;
43315
43316- atomic_t req_ser;
43317- atomic_t flush_ser;
43318+ atomic_unchecked_t req_ser;
43319+ atomic_unchecked_t flush_ser;
43320
43321 wait_queue_head_t force_wait;
43322 };
43323@@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
43324 struct ib_fmr_pool *pool = pool_ptr;
43325
43326 do {
43327- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
43328+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
43329 ib_fmr_batch_release(pool);
43330
43331- atomic_inc(&pool->flush_ser);
43332+ atomic_inc_unchecked(&pool->flush_ser);
43333 wake_up_interruptible(&pool->force_wait);
43334
43335 if (pool->flush_function)
43336@@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
43337 }
43338
43339 set_current_state(TASK_INTERRUPTIBLE);
43340- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
43341+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
43342 !kthread_should_stop())
43343 schedule();
43344 __set_current_state(TASK_RUNNING);
43345@@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
43346 pool->dirty_watermark = params->dirty_watermark;
43347 pool->dirty_len = 0;
43348 spin_lock_init(&pool->pool_lock);
43349- atomic_set(&pool->req_ser, 0);
43350- atomic_set(&pool->flush_ser, 0);
43351+ atomic_set_unchecked(&pool->req_ser, 0);
43352+ atomic_set_unchecked(&pool->flush_ser, 0);
43353 init_waitqueue_head(&pool->force_wait);
43354
43355 pool->thread = kthread_run(ib_fmr_cleanup_thread,
43356@@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
43357 }
43358 spin_unlock_irq(&pool->pool_lock);
43359
43360- serial = atomic_inc_return(&pool->req_ser);
43361+ serial = atomic_inc_return_unchecked(&pool->req_ser);
43362 wake_up_process(pool->thread);
43363
43364 if (wait_event_interruptible(pool->force_wait,
43365- atomic_read(&pool->flush_ser) - serial >= 0))
43366+ atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
43367 return -EINTR;
43368
43369 return 0;
43370@@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
43371 } else {
43372 list_add_tail(&fmr->list, &pool->dirty_list);
43373 if (++pool->dirty_len >= pool->dirty_watermark) {
43374- atomic_inc(&pool->req_ser);
43375+ atomic_inc_unchecked(&pool->req_ser);
43376 wake_up_process(pool->thread);
43377 }
43378 }
43379diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
43380index a9f0489..27a161b 100644
43381--- a/drivers/infiniband/core/uverbs_cmd.c
43382+++ b/drivers/infiniband/core/uverbs_cmd.c
43383@@ -951,6 +951,9 @@ ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file,
43384 if (copy_from_user(&cmd, buf, sizeof cmd))
43385 return -EFAULT;
43386
43387+ if (!access_ok_noprefault(VERIFY_READ, cmd.start, cmd.length))
43388+ return -EFAULT;
43389+
43390 INIT_UDATA(&udata, buf + sizeof cmd,
43391 (unsigned long) cmd.response + sizeof resp,
43392 in_len - sizeof cmd, out_len - sizeof resp);
43393diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
43394index 6791fd1..78bdcdf 100644
43395--- a/drivers/infiniband/hw/cxgb4/mem.c
43396+++ b/drivers/infiniband/hw/cxgb4/mem.c
43397@@ -256,7 +256,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
43398 int err;
43399 struct fw_ri_tpte tpt;
43400 u32 stag_idx;
43401- static atomic_t key;
43402+ static atomic_unchecked_t key;
43403
43404 if (c4iw_fatal_error(rdev))
43405 return -EIO;
43406@@ -277,7 +277,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
43407 if (rdev->stats.stag.cur > rdev->stats.stag.max)
43408 rdev->stats.stag.max = rdev->stats.stag.cur;
43409 mutex_unlock(&rdev->stats.lock);
43410- *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
43411+ *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
43412 }
43413 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
43414 __func__, stag_state, type, pdid, stag_idx);
43415diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
43416index 79b3dbc..96e5fcc 100644
43417--- a/drivers/infiniband/hw/ipath/ipath_rc.c
43418+++ b/drivers/infiniband/hw/ipath/ipath_rc.c
43419@@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
43420 struct ib_atomic_eth *ateth;
43421 struct ipath_ack_entry *e;
43422 u64 vaddr;
43423- atomic64_t *maddr;
43424+ atomic64_unchecked_t *maddr;
43425 u64 sdata;
43426 u32 rkey;
43427 u8 next;
43428@@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
43429 IB_ACCESS_REMOTE_ATOMIC)))
43430 goto nack_acc_unlck;
43431 /* Perform atomic OP and save result. */
43432- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
43433+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
43434 sdata = be64_to_cpu(ateth->swap_data);
43435 e = &qp->s_ack_queue[qp->r_head_ack_queue];
43436 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
43437- (u64) atomic64_add_return(sdata, maddr) - sdata :
43438+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
43439 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
43440 be64_to_cpu(ateth->compare_data),
43441 sdata);
43442diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
43443index 1f95bba..9530f87 100644
43444--- a/drivers/infiniband/hw/ipath/ipath_ruc.c
43445+++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
43446@@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ipath_qp *sqp)
43447 unsigned long flags;
43448 struct ib_wc wc;
43449 u64 sdata;
43450- atomic64_t *maddr;
43451+ atomic64_unchecked_t *maddr;
43452 enum ib_wc_status send_status;
43453
43454 /*
43455@@ -382,11 +382,11 @@ again:
43456 IB_ACCESS_REMOTE_ATOMIC)))
43457 goto acc_err;
43458 /* Perform atomic OP and save result. */
43459- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
43460+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
43461 sdata = wqe->wr.wr.atomic.compare_add;
43462 *(u64 *) sqp->s_sge.sge.vaddr =
43463 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
43464- (u64) atomic64_add_return(sdata, maddr) - sdata :
43465+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
43466 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
43467 sdata, wqe->wr.wr.atomic.swap);
43468 goto send_comp;
43469diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
43470index 5904026..f1c30e5 100644
43471--- a/drivers/infiniband/hw/mlx4/mad.c
43472+++ b/drivers/infiniband/hw/mlx4/mad.c
43473@@ -106,7 +106,7 @@ __be64 mlx4_ib_gen_node_guid(void)
43474
43475 __be64 mlx4_ib_get_new_demux_tid(struct mlx4_ib_demux_ctx *ctx)
43476 {
43477- return cpu_to_be64(atomic_inc_return(&ctx->tid)) |
43478+ return cpu_to_be64(atomic_inc_return_unchecked(&ctx->tid)) |
43479 cpu_to_be64(0xff00000000000000LL);
43480 }
43481
43482diff --git a/drivers/infiniband/hw/mlx4/mcg.c b/drivers/infiniband/hw/mlx4/mcg.c
43483index ed327e6..ca1739e0 100644
43484--- a/drivers/infiniband/hw/mlx4/mcg.c
43485+++ b/drivers/infiniband/hw/mlx4/mcg.c
43486@@ -1041,7 +1041,7 @@ int mlx4_ib_mcg_port_init(struct mlx4_ib_demux_ctx *ctx)
43487 {
43488 char name[20];
43489
43490- atomic_set(&ctx->tid, 0);
43491+ atomic_set_unchecked(&ctx->tid, 0);
43492 sprintf(name, "mlx4_ib_mcg%d", ctx->port);
43493 ctx->mcg_wq = create_singlethread_workqueue(name);
43494 if (!ctx->mcg_wq)
43495diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
43496index f829fd9..1a8d436 100644
43497--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
43498+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
43499@@ -439,7 +439,7 @@ struct mlx4_ib_demux_ctx {
43500 struct list_head mcg_mgid0_list;
43501 struct workqueue_struct *mcg_wq;
43502 struct mlx4_ib_demux_pv_ctx **tun;
43503- atomic_t tid;
43504+ atomic_unchecked_t tid;
43505 int flushing; /* flushing the work queue */
43506 };
43507
43508diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
43509index 9d3e5c1..6f166df 100644
43510--- a/drivers/infiniband/hw/mthca/mthca_cmd.c
43511+++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
43512@@ -772,7 +772,7 @@ static void mthca_setup_cmd_doorbells(struct mthca_dev *dev, u64 base)
43513 mthca_dbg(dev, "Mapped doorbell page for posting FW commands\n");
43514 }
43515
43516-int mthca_QUERY_FW(struct mthca_dev *dev)
43517+int __intentional_overflow(-1) mthca_QUERY_FW(struct mthca_dev *dev)
43518 {
43519 struct mthca_mailbox *mailbox;
43520 u32 *outbox;
43521@@ -1612,7 +1612,7 @@ int mthca_HW2SW_MPT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
43522 CMD_TIME_CLASS_B);
43523 }
43524
43525-int mthca_WRITE_MTT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
43526+int __intentional_overflow(-1) mthca_WRITE_MTT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
43527 int num_mtt)
43528 {
43529 return mthca_cmd(dev, mailbox->dma, num_mtt, 0, CMD_WRITE_MTT,
43530@@ -1634,7 +1634,7 @@ int mthca_MAP_EQ(struct mthca_dev *dev, u64 event_mask, int unmap,
43531 0, CMD_MAP_EQ, CMD_TIME_CLASS_B);
43532 }
43533
43534-int mthca_SW2HW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
43535+int __intentional_overflow(-1) mthca_SW2HW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
43536 int eq_num)
43537 {
43538 return mthca_cmd(dev, mailbox->dma, eq_num, 0, CMD_SW2HW_EQ,
43539@@ -1857,7 +1857,7 @@ int mthca_CONF_SPECIAL_QP(struct mthca_dev *dev, int type, u32 qpn)
43540 CMD_TIME_CLASS_B);
43541 }
43542
43543-int mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
43544+int __intentional_overflow(-1) mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
43545 int port, struct ib_wc *in_wc, struct ib_grh *in_grh,
43546 void *in_mad, void *response_mad)
43547 {
43548diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c
43549index ded76c1..0cf0a08 100644
43550--- a/drivers/infiniband/hw/mthca/mthca_main.c
43551+++ b/drivers/infiniband/hw/mthca/mthca_main.c
43552@@ -692,7 +692,7 @@ err_close:
43553 return err;
43554 }
43555
43556-static int mthca_setup_hca(struct mthca_dev *dev)
43557+static int __intentional_overflow(-1) mthca_setup_hca(struct mthca_dev *dev)
43558 {
43559 int err;
43560
43561diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c
43562index ed9a989..6aa5dc2 100644
43563--- a/drivers/infiniband/hw/mthca/mthca_mr.c
43564+++ b/drivers/infiniband/hw/mthca/mthca_mr.c
43565@@ -81,7 +81,7 @@ struct mthca_mpt_entry {
43566 * through the bitmaps)
43567 */
43568
43569-static u32 mthca_buddy_alloc(struct mthca_buddy *buddy, int order)
43570+static u32 __intentional_overflow(-1) mthca_buddy_alloc(struct mthca_buddy *buddy, int order)
43571 {
43572 int o;
43573 int m;
43574@@ -426,7 +426,7 @@ static inline u32 adjust_key(struct mthca_dev *dev, u32 key)
43575 return key;
43576 }
43577
43578-int mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
43579+int __intentional_overflow(-1) mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
43580 u64 iova, u64 total_size, u32 access, struct mthca_mr *mr)
43581 {
43582 struct mthca_mailbox *mailbox;
43583@@ -516,7 +516,7 @@ int mthca_mr_alloc_notrans(struct mthca_dev *dev, u32 pd,
43584 return mthca_mr_alloc(dev, pd, 12, 0, ~0ULL, access, mr);
43585 }
43586
43587-int mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd,
43588+int __intentional_overflow(-1) mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd,
43589 u64 *buffer_list, int buffer_size_shift,
43590 int list_len, u64 iova, u64 total_size,
43591 u32 access, struct mthca_mr *mr)
43592diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
43593index 415f8e1..e34214e 100644
43594--- a/drivers/infiniband/hw/mthca/mthca_provider.c
43595+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
43596@@ -764,7 +764,7 @@ unlock:
43597 return 0;
43598 }
43599
43600-static int mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
43601+static int __intentional_overflow(-1) mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
43602 {
43603 struct mthca_dev *dev = to_mdev(ibcq->device);
43604 struct mthca_cq *cq = to_mcq(ibcq);
43605diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
43606index 3b2a6dc..bce26ff 100644
43607--- a/drivers/infiniband/hw/nes/nes.c
43608+++ b/drivers/infiniband/hw/nes/nes.c
43609@@ -97,7 +97,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
43610 LIST_HEAD(nes_adapter_list);
43611 static LIST_HEAD(nes_dev_list);
43612
43613-atomic_t qps_destroyed;
43614+atomic_unchecked_t qps_destroyed;
43615
43616 static unsigned int ee_flsh_adapter;
43617 static unsigned int sysfs_nonidx_addr;
43618@@ -278,7 +278,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
43619 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
43620 struct nes_adapter *nesadapter = nesdev->nesadapter;
43621
43622- atomic_inc(&qps_destroyed);
43623+ atomic_inc_unchecked(&qps_destroyed);
43624
43625 /* Free the control structures */
43626
43627diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
43628index bd9d132..70d84f4 100644
43629--- a/drivers/infiniband/hw/nes/nes.h
43630+++ b/drivers/infiniband/hw/nes/nes.h
43631@@ -180,17 +180,17 @@ extern unsigned int nes_debug_level;
43632 extern unsigned int wqm_quanta;
43633 extern struct list_head nes_adapter_list;
43634
43635-extern atomic_t cm_connects;
43636-extern atomic_t cm_accepts;
43637-extern atomic_t cm_disconnects;
43638-extern atomic_t cm_closes;
43639-extern atomic_t cm_connecteds;
43640-extern atomic_t cm_connect_reqs;
43641-extern atomic_t cm_rejects;
43642-extern atomic_t mod_qp_timouts;
43643-extern atomic_t qps_created;
43644-extern atomic_t qps_destroyed;
43645-extern atomic_t sw_qps_destroyed;
43646+extern atomic_unchecked_t cm_connects;
43647+extern atomic_unchecked_t cm_accepts;
43648+extern atomic_unchecked_t cm_disconnects;
43649+extern atomic_unchecked_t cm_closes;
43650+extern atomic_unchecked_t cm_connecteds;
43651+extern atomic_unchecked_t cm_connect_reqs;
43652+extern atomic_unchecked_t cm_rejects;
43653+extern atomic_unchecked_t mod_qp_timouts;
43654+extern atomic_unchecked_t qps_created;
43655+extern atomic_unchecked_t qps_destroyed;
43656+extern atomic_unchecked_t sw_qps_destroyed;
43657 extern u32 mh_detected;
43658 extern u32 mh_pauses_sent;
43659 extern u32 cm_packets_sent;
43660@@ -199,16 +199,16 @@ extern u32 cm_packets_created;
43661 extern u32 cm_packets_received;
43662 extern u32 cm_packets_dropped;
43663 extern u32 cm_packets_retrans;
43664-extern atomic_t cm_listens_created;
43665-extern atomic_t cm_listens_destroyed;
43666+extern atomic_unchecked_t cm_listens_created;
43667+extern atomic_unchecked_t cm_listens_destroyed;
43668 extern u32 cm_backlog_drops;
43669-extern atomic_t cm_loopbacks;
43670-extern atomic_t cm_nodes_created;
43671-extern atomic_t cm_nodes_destroyed;
43672-extern atomic_t cm_accel_dropped_pkts;
43673-extern atomic_t cm_resets_recvd;
43674-extern atomic_t pau_qps_created;
43675-extern atomic_t pau_qps_destroyed;
43676+extern atomic_unchecked_t cm_loopbacks;
43677+extern atomic_unchecked_t cm_nodes_created;
43678+extern atomic_unchecked_t cm_nodes_destroyed;
43679+extern atomic_unchecked_t cm_accel_dropped_pkts;
43680+extern atomic_unchecked_t cm_resets_recvd;
43681+extern atomic_unchecked_t pau_qps_created;
43682+extern atomic_unchecked_t pau_qps_destroyed;
43683
43684 extern u32 int_mod_timer_init;
43685 extern u32 int_mod_cq_depth_256;
43686diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
43687index 6f09a72..cf4399d 100644
43688--- a/drivers/infiniband/hw/nes/nes_cm.c
43689+++ b/drivers/infiniband/hw/nes/nes_cm.c
43690@@ -69,14 +69,14 @@ u32 cm_packets_dropped;
43691 u32 cm_packets_retrans;
43692 u32 cm_packets_created;
43693 u32 cm_packets_received;
43694-atomic_t cm_listens_created;
43695-atomic_t cm_listens_destroyed;
43696+atomic_unchecked_t cm_listens_created;
43697+atomic_unchecked_t cm_listens_destroyed;
43698 u32 cm_backlog_drops;
43699-atomic_t cm_loopbacks;
43700-atomic_t cm_nodes_created;
43701-atomic_t cm_nodes_destroyed;
43702-atomic_t cm_accel_dropped_pkts;
43703-atomic_t cm_resets_recvd;
43704+atomic_unchecked_t cm_loopbacks;
43705+atomic_unchecked_t cm_nodes_created;
43706+atomic_unchecked_t cm_nodes_destroyed;
43707+atomic_unchecked_t cm_accel_dropped_pkts;
43708+atomic_unchecked_t cm_resets_recvd;
43709
43710 static inline int mini_cm_accelerated(struct nes_cm_core *, struct nes_cm_node *);
43711 static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *, struct nes_vnic *, struct nes_cm_info *);
43712@@ -135,28 +135,28 @@ static void record_ird_ord(struct nes_cm_node *, u16, u16);
43713 /* instance of function pointers for client API */
43714 /* set address of this instance to cm_core->cm_ops at cm_core alloc */
43715 static struct nes_cm_ops nes_cm_api = {
43716- mini_cm_accelerated,
43717- mini_cm_listen,
43718- mini_cm_del_listen,
43719- mini_cm_connect,
43720- mini_cm_close,
43721- mini_cm_accept,
43722- mini_cm_reject,
43723- mini_cm_recv_pkt,
43724- mini_cm_dealloc_core,
43725- mini_cm_get,
43726- mini_cm_set
43727+ .accelerated = mini_cm_accelerated,
43728+ .listen = mini_cm_listen,
43729+ .stop_listener = mini_cm_del_listen,
43730+ .connect = mini_cm_connect,
43731+ .close = mini_cm_close,
43732+ .accept = mini_cm_accept,
43733+ .reject = mini_cm_reject,
43734+ .recv_pkt = mini_cm_recv_pkt,
43735+ .destroy_cm_core = mini_cm_dealloc_core,
43736+ .get = mini_cm_get,
43737+ .set = mini_cm_set
43738 };
43739
43740 static struct nes_cm_core *g_cm_core;
43741
43742-atomic_t cm_connects;
43743-atomic_t cm_accepts;
43744-atomic_t cm_disconnects;
43745-atomic_t cm_closes;
43746-atomic_t cm_connecteds;
43747-atomic_t cm_connect_reqs;
43748-atomic_t cm_rejects;
43749+atomic_unchecked_t cm_connects;
43750+atomic_unchecked_t cm_accepts;
43751+atomic_unchecked_t cm_disconnects;
43752+atomic_unchecked_t cm_closes;
43753+atomic_unchecked_t cm_connecteds;
43754+atomic_unchecked_t cm_connect_reqs;
43755+atomic_unchecked_t cm_rejects;
43756
43757 int nes_add_ref_cm_node(struct nes_cm_node *cm_node)
43758 {
43759@@ -1436,7 +1436,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
43760 kfree(listener);
43761 listener = NULL;
43762 ret = 0;
43763- atomic_inc(&cm_listens_destroyed);
43764+ atomic_inc_unchecked(&cm_listens_destroyed);
43765 } else {
43766 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
43767 }
43768@@ -1637,7 +1637,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
43769 cm_node->rem_mac);
43770
43771 add_hte_node(cm_core, cm_node);
43772- atomic_inc(&cm_nodes_created);
43773+ atomic_inc_unchecked(&cm_nodes_created);
43774
43775 return cm_node;
43776 }
43777@@ -1698,7 +1698,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
43778 }
43779
43780 atomic_dec(&cm_core->node_cnt);
43781- atomic_inc(&cm_nodes_destroyed);
43782+ atomic_inc_unchecked(&cm_nodes_destroyed);
43783 nesqp = cm_node->nesqp;
43784 if (nesqp) {
43785 nesqp->cm_node = NULL;
43786@@ -1762,7 +1762,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
43787
43788 static void drop_packet(struct sk_buff *skb)
43789 {
43790- atomic_inc(&cm_accel_dropped_pkts);
43791+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
43792 dev_kfree_skb_any(skb);
43793 }
43794
43795@@ -1825,7 +1825,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
43796 {
43797
43798 int reset = 0; /* whether to send reset in case of err.. */
43799- atomic_inc(&cm_resets_recvd);
43800+ atomic_inc_unchecked(&cm_resets_recvd);
43801 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
43802 " refcnt=%d\n", cm_node, cm_node->state,
43803 atomic_read(&cm_node->ref_count));
43804@@ -2492,7 +2492,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
43805 rem_ref_cm_node(cm_node->cm_core, cm_node);
43806 return NULL;
43807 }
43808- atomic_inc(&cm_loopbacks);
43809+ atomic_inc_unchecked(&cm_loopbacks);
43810 loopbackremotenode->loopbackpartner = cm_node;
43811 loopbackremotenode->tcp_cntxt.rcv_wscale =
43812 NES_CM_DEFAULT_RCV_WND_SCALE;
43813@@ -2773,7 +2773,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
43814 nes_queue_mgt_skbs(skb, nesvnic, cm_node->nesqp);
43815 else {
43816 rem_ref_cm_node(cm_core, cm_node);
43817- atomic_inc(&cm_accel_dropped_pkts);
43818+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
43819 dev_kfree_skb_any(skb);
43820 }
43821 break;
43822@@ -3081,7 +3081,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
43823
43824 if ((cm_id) && (cm_id->event_handler)) {
43825 if (issue_disconn) {
43826- atomic_inc(&cm_disconnects);
43827+ atomic_inc_unchecked(&cm_disconnects);
43828 cm_event.event = IW_CM_EVENT_DISCONNECT;
43829 cm_event.status = disconn_status;
43830 cm_event.local_addr = cm_id->local_addr;
43831@@ -3103,7 +3103,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
43832 }
43833
43834 if (issue_close) {
43835- atomic_inc(&cm_closes);
43836+ atomic_inc_unchecked(&cm_closes);
43837 nes_disconnect(nesqp, 1);
43838
43839 cm_id->provider_data = nesqp;
43840@@ -3241,7 +3241,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
43841
43842 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
43843 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
43844- atomic_inc(&cm_accepts);
43845+ atomic_inc_unchecked(&cm_accepts);
43846
43847 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
43848 netdev_refcnt_read(nesvnic->netdev));
43849@@ -3439,7 +3439,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
43850 struct nes_cm_core *cm_core;
43851 u8 *start_buff;
43852
43853- atomic_inc(&cm_rejects);
43854+ atomic_inc_unchecked(&cm_rejects);
43855 cm_node = (struct nes_cm_node *)cm_id->provider_data;
43856 loopback = cm_node->loopbackpartner;
43857 cm_core = cm_node->cm_core;
43858@@ -3504,7 +3504,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
43859 ntohs(raddr->sin_port), ntohl(laddr->sin_addr.s_addr),
43860 ntohs(laddr->sin_port));
43861
43862- atomic_inc(&cm_connects);
43863+ atomic_inc_unchecked(&cm_connects);
43864 nesqp->active_conn = 1;
43865
43866 /* cache the cm_id in the qp */
43867@@ -3649,7 +3649,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
43868 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
43869 return err;
43870 }
43871- atomic_inc(&cm_listens_created);
43872+ atomic_inc_unchecked(&cm_listens_created);
43873 }
43874
43875 cm_id->add_ref(cm_id);
43876@@ -3756,7 +3756,7 @@ static void cm_event_connected(struct nes_cm_event *event)
43877
43878 if (nesqp->destroyed)
43879 return;
43880- atomic_inc(&cm_connecteds);
43881+ atomic_inc_unchecked(&cm_connecteds);
43882 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
43883 " local port 0x%04X. jiffies = %lu.\n",
43884 nesqp->hwqp.qp_id, ntohl(raddr->sin_addr.s_addr),
43885@@ -3941,7 +3941,7 @@ static void cm_event_reset(struct nes_cm_event *event)
43886
43887 cm_id->add_ref(cm_id);
43888 ret = cm_id->event_handler(cm_id, &cm_event);
43889- atomic_inc(&cm_closes);
43890+ atomic_inc_unchecked(&cm_closes);
43891 cm_event.event = IW_CM_EVENT_CLOSE;
43892 cm_event.status = 0;
43893 cm_event.provider_data = cm_id->provider_data;
43894@@ -3981,7 +3981,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
43895 return;
43896 cm_id = cm_node->cm_id;
43897
43898- atomic_inc(&cm_connect_reqs);
43899+ atomic_inc_unchecked(&cm_connect_reqs);
43900 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
43901 cm_node, cm_id, jiffies);
43902
43903@@ -4030,7 +4030,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
43904 return;
43905 cm_id = cm_node->cm_id;
43906
43907- atomic_inc(&cm_connect_reqs);
43908+ atomic_inc_unchecked(&cm_connect_reqs);
43909 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
43910 cm_node, cm_id, jiffies);
43911
43912diff --git a/drivers/infiniband/hw/nes/nes_mgt.c b/drivers/infiniband/hw/nes/nes_mgt.c
43913index 4166452..fc952c3 100644
43914--- a/drivers/infiniband/hw/nes/nes_mgt.c
43915+++ b/drivers/infiniband/hw/nes/nes_mgt.c
43916@@ -40,8 +40,8 @@
43917 #include "nes.h"
43918 #include "nes_mgt.h"
43919
43920-atomic_t pau_qps_created;
43921-atomic_t pau_qps_destroyed;
43922+atomic_unchecked_t pau_qps_created;
43923+atomic_unchecked_t pau_qps_destroyed;
43924
43925 static void nes_replenish_mgt_rq(struct nes_vnic_mgt *mgtvnic)
43926 {
43927@@ -621,7 +621,7 @@ void nes_destroy_pau_qp(struct nes_device *nesdev, struct nes_qp *nesqp)
43928 {
43929 struct sk_buff *skb;
43930 unsigned long flags;
43931- atomic_inc(&pau_qps_destroyed);
43932+ atomic_inc_unchecked(&pau_qps_destroyed);
43933
43934 /* Free packets that have not yet been forwarded */
43935 /* Lock is acquired by skb_dequeue when removing the skb */
43936@@ -810,7 +810,7 @@ static void nes_mgt_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *
43937 cq->cq_vbase[head].cqe_words[NES_NIC_CQE_HASH_RCVNXT]);
43938 skb_queue_head_init(&nesqp->pau_list);
43939 spin_lock_init(&nesqp->pau_lock);
43940- atomic_inc(&pau_qps_created);
43941+ atomic_inc_unchecked(&pau_qps_created);
43942 nes_change_quad_hash(nesdev, mgtvnic->nesvnic, nesqp);
43943 }
43944
43945diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
43946index 70acda9..a96de9d 100644
43947--- a/drivers/infiniband/hw/nes/nes_nic.c
43948+++ b/drivers/infiniband/hw/nes/nes_nic.c
43949@@ -1274,39 +1274,39 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
43950 target_stat_values[++index] = mh_detected;
43951 target_stat_values[++index] = mh_pauses_sent;
43952 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
43953- target_stat_values[++index] = atomic_read(&cm_connects);
43954- target_stat_values[++index] = atomic_read(&cm_accepts);
43955- target_stat_values[++index] = atomic_read(&cm_disconnects);
43956- target_stat_values[++index] = atomic_read(&cm_connecteds);
43957- target_stat_values[++index] = atomic_read(&cm_connect_reqs);
43958- target_stat_values[++index] = atomic_read(&cm_rejects);
43959- target_stat_values[++index] = atomic_read(&mod_qp_timouts);
43960- target_stat_values[++index] = atomic_read(&qps_created);
43961- target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
43962- target_stat_values[++index] = atomic_read(&qps_destroyed);
43963- target_stat_values[++index] = atomic_read(&cm_closes);
43964+ target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
43965+ target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
43966+ target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
43967+ target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
43968+ target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
43969+ target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
43970+ target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
43971+ target_stat_values[++index] = atomic_read_unchecked(&qps_created);
43972+ target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
43973+ target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
43974+ target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
43975 target_stat_values[++index] = cm_packets_sent;
43976 target_stat_values[++index] = cm_packets_bounced;
43977 target_stat_values[++index] = cm_packets_created;
43978 target_stat_values[++index] = cm_packets_received;
43979 target_stat_values[++index] = cm_packets_dropped;
43980 target_stat_values[++index] = cm_packets_retrans;
43981- target_stat_values[++index] = atomic_read(&cm_listens_created);
43982- target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
43983+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
43984+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
43985 target_stat_values[++index] = cm_backlog_drops;
43986- target_stat_values[++index] = atomic_read(&cm_loopbacks);
43987- target_stat_values[++index] = atomic_read(&cm_nodes_created);
43988- target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
43989- target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
43990- target_stat_values[++index] = atomic_read(&cm_resets_recvd);
43991+ target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
43992+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
43993+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
43994+ target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
43995+ target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
43996 target_stat_values[++index] = nesadapter->free_4kpbl;
43997 target_stat_values[++index] = nesadapter->free_256pbl;
43998 target_stat_values[++index] = int_mod_timer_init;
43999 target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
44000 target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
44001 target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
44002- target_stat_values[++index] = atomic_read(&pau_qps_created);
44003- target_stat_values[++index] = atomic_read(&pau_qps_destroyed);
44004+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_created);
44005+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_destroyed);
44006 }
44007
44008 /**
44009diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
44010index c0d0296..3185f57 100644
44011--- a/drivers/infiniband/hw/nes/nes_verbs.c
44012+++ b/drivers/infiniband/hw/nes/nes_verbs.c
44013@@ -46,9 +46,9 @@
44014
44015 #include <rdma/ib_umem.h>
44016
44017-atomic_t mod_qp_timouts;
44018-atomic_t qps_created;
44019-atomic_t sw_qps_destroyed;
44020+atomic_unchecked_t mod_qp_timouts;
44021+atomic_unchecked_t qps_created;
44022+atomic_unchecked_t sw_qps_destroyed;
44023
44024 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
44025
44026@@ -1134,7 +1134,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
44027 if (init_attr->create_flags)
44028 return ERR_PTR(-EINVAL);
44029
44030- atomic_inc(&qps_created);
44031+ atomic_inc_unchecked(&qps_created);
44032 switch (init_attr->qp_type) {
44033 case IB_QPT_RC:
44034 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
44035@@ -1468,7 +1468,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
44036 struct iw_cm_event cm_event;
44037 int ret = 0;
44038
44039- atomic_inc(&sw_qps_destroyed);
44040+ atomic_inc_unchecked(&sw_qps_destroyed);
44041 nesqp->destroyed = 1;
44042
44043 /* Blow away the connection if it exists. */
44044diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
44045index ffd48bf..83cdb56 100644
44046--- a/drivers/infiniband/hw/qib/qib.h
44047+++ b/drivers/infiniband/hw/qib/qib.h
44048@@ -52,6 +52,7 @@
44049 #include <linux/kref.h>
44050 #include <linux/sched.h>
44051 #include <linux/kthread.h>
44052+#include <linux/slab.h>
44053
44054 #include "qib_common.h"
44055 #include "qib_verbs.h"
44056diff --git a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
44057index cdc7df4..a2fdfdb 100644
44058--- a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
44059+++ b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
44060@@ -156,7 +156,7 @@ static size_t ipoib_get_size(const struct net_device *dev)
44061 nla_total_size(2); /* IFLA_IPOIB_UMCAST */
44062 }
44063
44064-static struct rtnl_link_ops ipoib_link_ops __read_mostly = {
44065+static struct rtnl_link_ops ipoib_link_ops = {
44066 .kind = "ipoib",
44067 .maxtype = IFLA_IPOIB_MAX,
44068 .policy = ipoib_policy,
44069diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
44070index e853a21..56fc5a8 100644
44071--- a/drivers/input/gameport/gameport.c
44072+++ b/drivers/input/gameport/gameport.c
44073@@ -527,14 +527,14 @@ EXPORT_SYMBOL(gameport_set_phys);
44074 */
44075 static void gameport_init_port(struct gameport *gameport)
44076 {
44077- static atomic_t gameport_no = ATOMIC_INIT(-1);
44078+ static atomic_unchecked_t gameport_no = ATOMIC_INIT(-1);
44079
44080 __module_get(THIS_MODULE);
44081
44082 mutex_init(&gameport->drv_mutex);
44083 device_initialize(&gameport->dev);
44084 dev_set_name(&gameport->dev, "gameport%lu",
44085- (unsigned long)atomic_inc_return(&gameport_no));
44086+ (unsigned long)atomic_inc_return_unchecked(&gameport_no));
44087 gameport->dev.bus = &gameport_bus;
44088 gameport->dev.release = gameport_release_port;
44089 if (gameport->parent)
44090diff --git a/drivers/input/input.c b/drivers/input/input.c
44091index cc357f1..ee42fbc 100644
44092--- a/drivers/input/input.c
44093+++ b/drivers/input/input.c
44094@@ -1781,7 +1781,7 @@ EXPORT_SYMBOL_GPL(input_class);
44095 */
44096 struct input_dev *input_allocate_device(void)
44097 {
44098- static atomic_t input_no = ATOMIC_INIT(-1);
44099+ static atomic_unchecked_t input_no = ATOMIC_INIT(-1);
44100 struct input_dev *dev;
44101
44102 dev = kzalloc(sizeof(struct input_dev), GFP_KERNEL);
44103@@ -1796,7 +1796,7 @@ struct input_dev *input_allocate_device(void)
44104 INIT_LIST_HEAD(&dev->node);
44105
44106 dev_set_name(&dev->dev, "input%lu",
44107- (unsigned long)atomic_inc_return(&input_no));
44108+ (unsigned long)atomic_inc_return_unchecked(&input_no));
44109
44110 __module_get(THIS_MODULE);
44111 }
44112diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
44113index 4a95b22..874c182 100644
44114--- a/drivers/input/joystick/sidewinder.c
44115+++ b/drivers/input/joystick/sidewinder.c
44116@@ -30,6 +30,7 @@
44117 #include <linux/kernel.h>
44118 #include <linux/module.h>
44119 #include <linux/slab.h>
44120+#include <linux/sched.h>
44121 #include <linux/input.h>
44122 #include <linux/gameport.h>
44123 #include <linux/jiffies.h>
44124diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
44125index 3aa2f3f..53c00ea 100644
44126--- a/drivers/input/joystick/xpad.c
44127+++ b/drivers/input/joystick/xpad.c
44128@@ -886,7 +886,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
44129
44130 static int xpad_led_probe(struct usb_xpad *xpad)
44131 {
44132- static atomic_t led_seq = ATOMIC_INIT(-1);
44133+ static atomic_unchecked_t led_seq = ATOMIC_INIT(-1);
44134 unsigned long led_no;
44135 struct xpad_led *led;
44136 struct led_classdev *led_cdev;
44137@@ -899,7 +899,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
44138 if (!led)
44139 return -ENOMEM;
44140
44141- led_no = atomic_inc_return(&led_seq);
44142+ led_no = atomic_inc_return_unchecked(&led_seq);
44143
44144 snprintf(led->name, sizeof(led->name), "xpad%lu", led_no);
44145 led->xpad = xpad;
44146diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c
44147index ac1fa5f..5f7502c 100644
44148--- a/drivers/input/misc/ims-pcu.c
44149+++ b/drivers/input/misc/ims-pcu.c
44150@@ -1851,7 +1851,7 @@ static int ims_pcu_identify_type(struct ims_pcu *pcu, u8 *device_id)
44151
44152 static int ims_pcu_init_application_mode(struct ims_pcu *pcu)
44153 {
44154- static atomic_t device_no = ATOMIC_INIT(-1);
44155+ static atomic_unchecked_t device_no = ATOMIC_INIT(-1);
44156
44157 const struct ims_pcu_device_info *info;
44158 int error;
44159@@ -1882,7 +1882,7 @@ static int ims_pcu_init_application_mode(struct ims_pcu *pcu)
44160 }
44161
44162 /* Device appears to be operable, complete initialization */
44163- pcu->device_no = atomic_inc_return(&device_no);
44164+ pcu->device_no = atomic_inc_return_unchecked(&device_no);
44165
44166 /*
44167 * PCU-B devices, both GEN_1 and GEN_2 do not have OFN sensor
44168diff --git a/drivers/input/mouse/psmouse.h b/drivers/input/mouse/psmouse.h
44169index d02e1bd..d719719 100644
44170--- a/drivers/input/mouse/psmouse.h
44171+++ b/drivers/input/mouse/psmouse.h
44172@@ -124,7 +124,7 @@ struct psmouse_attribute {
44173 ssize_t (*set)(struct psmouse *psmouse, void *data,
44174 const char *buf, size_t count);
44175 bool protect;
44176-};
44177+} __do_const;
44178 #define to_psmouse_attr(a) container_of((a), struct psmouse_attribute, dattr)
44179
44180 ssize_t psmouse_attr_show_helper(struct device *dev, struct device_attribute *attr,
44181diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
44182index b604564..3f14ae4 100644
44183--- a/drivers/input/mousedev.c
44184+++ b/drivers/input/mousedev.c
44185@@ -744,7 +744,7 @@ static ssize_t mousedev_read(struct file *file, char __user *buffer,
44186
44187 spin_unlock_irq(&client->packet_lock);
44188
44189- if (copy_to_user(buffer, data, count))
44190+ if (count > sizeof(data) || copy_to_user(buffer, data, count))
44191 return -EFAULT;
44192
44193 return count;
44194diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
44195index a05a517..323a2fd 100644
44196--- a/drivers/input/serio/serio.c
44197+++ b/drivers/input/serio/serio.c
44198@@ -514,7 +514,7 @@ static void serio_release_port(struct device *dev)
44199 */
44200 static void serio_init_port(struct serio *serio)
44201 {
44202- static atomic_t serio_no = ATOMIC_INIT(-1);
44203+ static atomic_unchecked_t serio_no = ATOMIC_INIT(-1);
44204
44205 __module_get(THIS_MODULE);
44206
44207@@ -525,7 +525,7 @@ static void serio_init_port(struct serio *serio)
44208 mutex_init(&serio->drv_mutex);
44209 device_initialize(&serio->dev);
44210 dev_set_name(&serio->dev, "serio%lu",
44211- (unsigned long)atomic_inc_return(&serio_no));
44212+ (unsigned long)atomic_inc_return_unchecked(&serio_no));
44213 serio->dev.bus = &serio_bus;
44214 serio->dev.release = serio_release_port;
44215 serio->dev.groups = serio_device_attr_groups;
44216diff --git a/drivers/input/serio/serio_raw.c b/drivers/input/serio/serio_raw.c
44217index 71ef5d6..93380a9 100644
44218--- a/drivers/input/serio/serio_raw.c
44219+++ b/drivers/input/serio/serio_raw.c
44220@@ -292,7 +292,7 @@ static irqreturn_t serio_raw_interrupt(struct serio *serio, unsigned char data,
44221
44222 static int serio_raw_connect(struct serio *serio, struct serio_driver *drv)
44223 {
44224- static atomic_t serio_raw_no = ATOMIC_INIT(-1);
44225+ static atomic_unchecked_t serio_raw_no = ATOMIC_INIT(-1);
44226 struct serio_raw *serio_raw;
44227 int err;
44228
44229@@ -303,7 +303,7 @@ static int serio_raw_connect(struct serio *serio, struct serio_driver *drv)
44230 }
44231
44232 snprintf(serio_raw->name, sizeof(serio_raw->name),
44233- "serio_raw%ld", (long)atomic_inc_return(&serio_raw_no));
44234+ "serio_raw%ld", (long)atomic_inc_return_unchecked(&serio_raw_no));
44235 kref_init(&serio_raw->kref);
44236 INIT_LIST_HEAD(&serio_raw->client_list);
44237 init_waitqueue_head(&serio_raw->wait);
44238diff --git a/drivers/input/touchscreen/htcpen.c b/drivers/input/touchscreen/htcpen.c
44239index 92e2243..8fd9092 100644
44240--- a/drivers/input/touchscreen/htcpen.c
44241+++ b/drivers/input/touchscreen/htcpen.c
44242@@ -219,7 +219,7 @@ static struct isa_driver htcpen_isa_driver = {
44243 }
44244 };
44245
44246-static struct dmi_system_id htcshift_dmi_table[] __initdata = {
44247+static const struct dmi_system_id htcshift_dmi_table[] __initconst = {
44248 {
44249 .ident = "Shift",
44250 .matches = {
44251diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
44252index 13cfbf4..b5184d9 100644
44253--- a/drivers/iommu/amd_iommu.c
44254+++ b/drivers/iommu/amd_iommu.c
44255@@ -823,11 +823,21 @@ static void copy_cmd_to_buffer(struct amd_iommu *iommu,
44256
44257 static void build_completion_wait(struct iommu_cmd *cmd, u64 address)
44258 {
44259+ phys_addr_t physaddr;
44260 WARN_ON(address & 0x7ULL);
44261
44262 memset(cmd, 0, sizeof(*cmd));
44263- cmd->data[0] = lower_32_bits(__pa(address)) | CMD_COMPL_WAIT_STORE_MASK;
44264- cmd->data[1] = upper_32_bits(__pa(address));
44265+
44266+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
44267+ if (object_starts_on_stack((void *)address)) {
44268+ void *adjbuf = (void *)address - current->stack + current->lowmem_stack;
44269+ physaddr = __pa((u64)adjbuf);
44270+ } else
44271+#endif
44272+ physaddr = __pa(address);
44273+
44274+ cmd->data[0] = lower_32_bits(physaddr) | CMD_COMPL_WAIT_STORE_MASK;
44275+ cmd->data[1] = upper_32_bits(physaddr);
44276 cmd->data[2] = 1;
44277 CMD_SET_TYPE(cmd, CMD_COMPL_WAIT);
44278 }
44279diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
44280index 2d1b203..b9f8e18 100644
44281--- a/drivers/iommu/arm-smmu.c
44282+++ b/drivers/iommu/arm-smmu.c
44283@@ -331,7 +331,7 @@ enum arm_smmu_domain_stage {
44284
44285 struct arm_smmu_domain {
44286 struct arm_smmu_device *smmu;
44287- struct io_pgtable_ops *pgtbl_ops;
44288+ struct io_pgtable *pgtbl;
44289 spinlock_t pgtbl_lock;
44290 struct arm_smmu_cfg cfg;
44291 enum arm_smmu_domain_stage stage;
44292@@ -807,7 +807,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
44293 {
44294 int irq, start, ret = 0;
44295 unsigned long ias, oas;
44296- struct io_pgtable_ops *pgtbl_ops;
44297+ struct io_pgtable *pgtbl;
44298 struct io_pgtable_cfg pgtbl_cfg;
44299 enum io_pgtable_fmt fmt;
44300 struct arm_smmu_domain *smmu_domain = domain->priv;
44301@@ -892,14 +892,16 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
44302 };
44303
44304 smmu_domain->smmu = smmu;
44305- pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
44306- if (!pgtbl_ops) {
44307+ pgtbl = alloc_io_pgtable(fmt, &pgtbl_cfg, smmu_domain);
44308+ if (!pgtbl) {
44309 ret = -ENOMEM;
44310 goto out_clear_smmu;
44311 }
44312
44313 /* Update our support page sizes to reflect the page table format */
44314- arm_smmu_ops.pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
44315+ pax_open_kernel();
44316+ *(unsigned long *)&arm_smmu_ops.pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
44317+ pax_close_kernel();
44318
44319 /* Initialise the context bank with our page table cfg */
44320 arm_smmu_init_context_bank(smmu_domain, &pgtbl_cfg);
44321@@ -920,7 +922,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
44322 mutex_unlock(&smmu_domain->init_mutex);
44323
44324 /* Publish page table ops for map/unmap */
44325- smmu_domain->pgtbl_ops = pgtbl_ops;
44326+ smmu_domain->pgtbl = pgtbl;
44327 return 0;
44328
44329 out_clear_smmu:
44330@@ -953,8 +955,7 @@ static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
44331 free_irq(irq, domain);
44332 }
44333
44334- if (smmu_domain->pgtbl_ops)
44335- free_io_pgtable_ops(smmu_domain->pgtbl_ops);
44336+ free_io_pgtable(smmu_domain->pgtbl);
44337
44338 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
44339 }
44340@@ -1178,13 +1179,13 @@ static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
44341 int ret;
44342 unsigned long flags;
44343 struct arm_smmu_domain *smmu_domain = domain->priv;
44344- struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
44345+ struct io_pgtable *iop = smmu_domain->pgtbl;
44346
44347- if (!ops)
44348+ if (!iop)
44349 return -ENODEV;
44350
44351 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
44352- ret = ops->map(ops, iova, paddr, size, prot);
44353+ ret = iop->ops->map(iop, iova, paddr, size, prot);
44354 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
44355 return ret;
44356 }
44357@@ -1195,13 +1196,13 @@ static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
44358 size_t ret;
44359 unsigned long flags;
44360 struct arm_smmu_domain *smmu_domain = domain->priv;
44361- struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
44362+ struct io_pgtable *iop = smmu_domain->pgtbl;
44363
44364- if (!ops)
44365+ if (!iop)
44366 return 0;
44367
44368 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
44369- ret = ops->unmap(ops, iova, size);
44370+ ret = iop->ops->unmap(iop, iova, size);
44371 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
44372 return ret;
44373 }
44374@@ -1212,7 +1213,7 @@ static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
44375 struct arm_smmu_domain *smmu_domain = domain->priv;
44376 struct arm_smmu_device *smmu = smmu_domain->smmu;
44377 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
44378- struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
44379+ struct io_pgtable *iop = smmu_domain->pgtbl;
44380 struct device *dev = smmu->dev;
44381 void __iomem *cb_base;
44382 u32 tmp;
44383@@ -1235,7 +1236,7 @@ static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
44384 dev_err(dev,
44385 "iova to phys timed out on 0x%pad. Falling back to software table walk.\n",
44386 &iova);
44387- return ops->iova_to_phys(ops, iova);
44388+ return iop->ops->iova_to_phys(iop, iova);
44389 }
44390
44391 phys = readl_relaxed(cb_base + ARM_SMMU_CB_PAR_LO);
44392@@ -1256,9 +1257,9 @@ static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
44393 phys_addr_t ret;
44394 unsigned long flags;
44395 struct arm_smmu_domain *smmu_domain = domain->priv;
44396- struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
44397+ struct io_pgtable *iop = smmu_domain->pgtbl;
44398
44399- if (!ops)
44400+ if (!iop)
44401 return 0;
44402
44403 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
44404@@ -1266,7 +1267,7 @@ static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
44405 smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
44406 ret = arm_smmu_iova_to_phys_hard(domain, iova);
44407 } else {
44408- ret = ops->iova_to_phys(ops, iova);
44409+ ret = iop->ops->iova_to_phys(iop, iova);
44410 }
44411
44412 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
44413@@ -1625,7 +1626,9 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
44414 size |= SZ_64K | SZ_512M;
44415 }
44416
44417- arm_smmu_ops.pgsize_bitmap &= size;
44418+ pax_open_kernel();
44419+ *(unsigned long *)&arm_smmu_ops.pgsize_bitmap &= size;
44420+ pax_close_kernel();
44421 dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n", size);
44422
44423 if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
44424diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
44425index b610a8d..08eb879 100644
44426--- a/drivers/iommu/io-pgtable-arm.c
44427+++ b/drivers/iommu/io-pgtable-arm.c
44428@@ -36,12 +36,6 @@
44429 #define io_pgtable_to_data(x) \
44430 container_of((x), struct arm_lpae_io_pgtable, iop)
44431
44432-#define io_pgtable_ops_to_pgtable(x) \
44433- container_of((x), struct io_pgtable, ops)
44434-
44435-#define io_pgtable_ops_to_data(x) \
44436- io_pgtable_to_data(io_pgtable_ops_to_pgtable(x))
44437-
44438 /*
44439 * For consistency with the architecture, we always consider
44440 * ARM_LPAE_MAX_LEVELS levels, with the walk starting at level n >=0
44441@@ -302,10 +296,10 @@ static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
44442 return pte;
44443 }
44444
44445-static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova,
44446+static int arm_lpae_map(struct io_pgtable *iop, unsigned long iova,
44447 phys_addr_t paddr, size_t size, int iommu_prot)
44448 {
44449- struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
44450+ struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop);
44451 arm_lpae_iopte *ptep = data->pgd;
44452 int lvl = ARM_LPAE_START_LVL(data);
44453 arm_lpae_iopte prot;
44454@@ -445,12 +439,11 @@ static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
44455 return __arm_lpae_unmap(data, iova, size, lvl + 1, ptep);
44456 }
44457
44458-static int arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
44459+static int arm_lpae_unmap(struct io_pgtable *iop, unsigned long iova,
44460 size_t size)
44461 {
44462 size_t unmapped;
44463- struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
44464- struct io_pgtable *iop = &data->iop;
44465+ struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop);
44466 arm_lpae_iopte *ptep = data->pgd;
44467 int lvl = ARM_LPAE_START_LVL(data);
44468
44469@@ -461,10 +454,10 @@ static int arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
44470 return unmapped;
44471 }
44472
44473-static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops,
44474+static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable *iop,
44475 unsigned long iova)
44476 {
44477- struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
44478+ struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop);
44479 arm_lpae_iopte pte, *ptep = data->pgd;
44480 int lvl = ARM_LPAE_START_LVL(data);
44481
44482@@ -531,6 +524,12 @@ static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg *cfg)
44483 }
44484 }
44485
44486+static struct io_pgtable_ops arm_lpae_io_pgtable_ops = {
44487+ .map = arm_lpae_map,
44488+ .unmap = arm_lpae_unmap,
44489+ .iova_to_phys = arm_lpae_iova_to_phys,
44490+};
44491+
44492 static struct arm_lpae_io_pgtable *
44493 arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg)
44494 {
44495@@ -562,11 +561,7 @@ arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg)
44496 pgd_bits = va_bits - (data->bits_per_level * (data->levels - 1));
44497 data->pgd_size = 1UL << (pgd_bits + ilog2(sizeof(arm_lpae_iopte)));
44498
44499- data->iop.ops = (struct io_pgtable_ops) {
44500- .map = arm_lpae_map,
44501- .unmap = arm_lpae_unmap,
44502- .iova_to_phys = arm_lpae_iova_to_phys,
44503- };
44504+ data->iop.ops = &arm_lpae_io_pgtable_ops;
44505
44506 return data;
44507 }
44508@@ -825,9 +820,9 @@ static struct iommu_gather_ops dummy_tlb_ops __initdata = {
44509 .flush_pgtable = dummy_flush_pgtable,
44510 };
44511
44512-static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops)
44513+static void __init arm_lpae_dump_ops(struct io_pgtable *iop)
44514 {
44515- struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
44516+ struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop);
44517 struct io_pgtable_cfg *cfg = &data->iop.cfg;
44518
44519 pr_err("cfg: pgsize_bitmap 0x%lx, ias %u-bit\n",
44520@@ -837,9 +832,9 @@ static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops)
44521 data->bits_per_level, data->pgd);
44522 }
44523
44524-#define __FAIL(ops, i) ({ \
44525+#define __FAIL(iop, i) ({ \
44526 WARN(1, "selftest: test failed for fmt idx %d\n", (i)); \
44527- arm_lpae_dump_ops(ops); \
44528+ arm_lpae_dump_ops(iop); \
44529 selftest_running = false; \
44530 -EFAULT; \
44531 })
44532@@ -854,30 +849,32 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
44533 int i, j;
44534 unsigned long iova;
44535 size_t size;
44536- struct io_pgtable_ops *ops;
44537+ struct io_pgtable *iop;
44538+ const struct io_pgtable_ops *ops;
44539
44540 selftest_running = true;
44541
44542 for (i = 0; i < ARRAY_SIZE(fmts); ++i) {
44543 cfg_cookie = cfg;
44544- ops = alloc_io_pgtable_ops(fmts[i], cfg, cfg);
44545- if (!ops) {
44546+ iop = alloc_io_pgtable(fmts[i], cfg, cfg);
44547+ if (!iop) {
44548 pr_err("selftest: failed to allocate io pgtable ops\n");
44549 return -ENOMEM;
44550 }
44551+ ops = iop->ops;
44552
44553 /*
44554 * Initial sanity checks.
44555 * Empty page tables shouldn't provide any translations.
44556 */
44557- if (ops->iova_to_phys(ops, 42))
44558- return __FAIL(ops, i);
44559+ if (ops->iova_to_phys(iop, 42))
44560+ return __FAIL(iop, i);
44561
44562- if (ops->iova_to_phys(ops, SZ_1G + 42))
44563- return __FAIL(ops, i);
44564+ if (ops->iova_to_phys(iop, SZ_1G + 42))
44565+ return __FAIL(iop, i);
44566
44567- if (ops->iova_to_phys(ops, SZ_2G + 42))
44568- return __FAIL(ops, i);
44569+ if (ops->iova_to_phys(iop, SZ_2G + 42))
44570+ return __FAIL(iop, i);
44571
44572 /*
44573 * Distinct mappings of different granule sizes.
44574@@ -887,19 +884,19 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
44575 while (j != BITS_PER_LONG) {
44576 size = 1UL << j;
44577
44578- if (ops->map(ops, iova, iova, size, IOMMU_READ |
44579+ if (ops->map(iop, iova, iova, size, IOMMU_READ |
44580 IOMMU_WRITE |
44581 IOMMU_NOEXEC |
44582 IOMMU_CACHE))
44583- return __FAIL(ops, i);
44584+ return __FAIL(iop, i);
44585
44586 /* Overlapping mappings */
44587- if (!ops->map(ops, iova, iova + size, size,
44588+ if (!ops->map(iop, iova, iova + size, size,
44589 IOMMU_READ | IOMMU_NOEXEC))
44590- return __FAIL(ops, i);
44591+ return __FAIL(iop, i);
44592
44593- if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
44594- return __FAIL(ops, i);
44595+ if (ops->iova_to_phys(iop, iova + 42) != (iova + 42))
44596+ return __FAIL(iop, i);
44597
44598 iova += SZ_1G;
44599 j++;
44600@@ -908,15 +905,15 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
44601
44602 /* Partial unmap */
44603 size = 1UL << __ffs(cfg->pgsize_bitmap);
44604- if (ops->unmap(ops, SZ_1G + size, size) != size)
44605- return __FAIL(ops, i);
44606+ if (ops->unmap(iop, SZ_1G + size, size) != size)
44607+ return __FAIL(iop, i);
44608
44609 /* Remap of partial unmap */
44610- if (ops->map(ops, SZ_1G + size, size, size, IOMMU_READ))
44611- return __FAIL(ops, i);
44612+ if (ops->map(iop, SZ_1G + size, size, size, IOMMU_READ))
44613+ return __FAIL(iop, i);
44614
44615- if (ops->iova_to_phys(ops, SZ_1G + size + 42) != (size + 42))
44616- return __FAIL(ops, i);
44617+ if (ops->iova_to_phys(iop, SZ_1G + size + 42) != (size + 42))
44618+ return __FAIL(iop, i);
44619
44620 /* Full unmap */
44621 iova = 0;
44622@@ -924,25 +921,25 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
44623 while (j != BITS_PER_LONG) {
44624 size = 1UL << j;
44625
44626- if (ops->unmap(ops, iova, size) != size)
44627- return __FAIL(ops, i);
44628+ if (ops->unmap(iop, iova, size) != size)
44629+ return __FAIL(iop, i);
44630
44631- if (ops->iova_to_phys(ops, iova + 42))
44632- return __FAIL(ops, i);
44633+ if (ops->iova_to_phys(iop, iova + 42))
44634+ return __FAIL(iop, i);
44635
44636 /* Remap full block */
44637- if (ops->map(ops, iova, iova, size, IOMMU_WRITE))
44638- return __FAIL(ops, i);
44639+ if (ops->map(iop, iova, iova, size, IOMMU_WRITE))
44640+ return __FAIL(iop, i);
44641
44642- if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
44643- return __FAIL(ops, i);
44644+ if (ops->iova_to_phys(iop, iova + 42) != (iova + 42))
44645+ return __FAIL(iop, i);
44646
44647 iova += SZ_1G;
44648 j++;
44649 j = find_next_bit(&cfg->pgsize_bitmap, BITS_PER_LONG, j);
44650 }
44651
44652- free_io_pgtable_ops(ops);
44653+ free_io_pgtable(iop);
44654 }
44655
44656 selftest_running = false;
44657diff --git a/drivers/iommu/io-pgtable.c b/drivers/iommu/io-pgtable.c
44658index 6436fe2..088c965 100644
44659--- a/drivers/iommu/io-pgtable.c
44660+++ b/drivers/iommu/io-pgtable.c
44661@@ -40,7 +40,7 @@ io_pgtable_init_table[IO_PGTABLE_NUM_FMTS] =
44662 #endif
44663 };
44664
44665-struct io_pgtable_ops *alloc_io_pgtable_ops(enum io_pgtable_fmt fmt,
44666+struct io_pgtable *alloc_io_pgtable(enum io_pgtable_fmt fmt,
44667 struct io_pgtable_cfg *cfg,
44668 void *cookie)
44669 {
44670@@ -62,21 +62,18 @@ struct io_pgtable_ops *alloc_io_pgtable_ops(enum io_pgtable_fmt fmt,
44671 iop->cookie = cookie;
44672 iop->cfg = *cfg;
44673
44674- return &iop->ops;
44675+ return iop;
44676 }
44677
44678 /*
44679 * It is the IOMMU driver's responsibility to ensure that the page table
44680 * is no longer accessible to the walker by this point.
44681 */
44682-void free_io_pgtable_ops(struct io_pgtable_ops *ops)
44683+void free_io_pgtable(struct io_pgtable *iop)
44684 {
44685- struct io_pgtable *iop;
44686-
44687- if (!ops)
44688+ if (!iop)
44689 return;
44690
44691- iop = container_of(ops, struct io_pgtable, ops);
44692 iop->cfg.tlb->tlb_flush_all(iop->cookie);
44693 io_pgtable_init_table[iop->fmt]->free(iop);
44694 }
44695diff --git a/drivers/iommu/io-pgtable.h b/drivers/iommu/io-pgtable.h
44696index 10e32f6..0b276c8 100644
44697--- a/drivers/iommu/io-pgtable.h
44698+++ b/drivers/iommu/io-pgtable.h
44699@@ -75,17 +75,18 @@ struct io_pgtable_cfg {
44700 * These functions map directly onto the iommu_ops member functions with
44701 * the same names.
44702 */
44703+struct io_pgtable;
44704 struct io_pgtable_ops {
44705- int (*map)(struct io_pgtable_ops *ops, unsigned long iova,
44706+ int (*map)(struct io_pgtable *iop, unsigned long iova,
44707 phys_addr_t paddr, size_t size, int prot);
44708- int (*unmap)(struct io_pgtable_ops *ops, unsigned long iova,
44709+ int (*unmap)(struct io_pgtable *iop, unsigned long iova,
44710 size_t size);
44711- phys_addr_t (*iova_to_phys)(struct io_pgtable_ops *ops,
44712+ phys_addr_t (*iova_to_phys)(struct io_pgtable *iop,
44713 unsigned long iova);
44714 };
44715
44716 /**
44717- * alloc_io_pgtable_ops() - Allocate a page table allocator for use by an IOMMU.
44718+ * alloc_io_pgtable() - Allocate a page table allocator for use by an IOMMU.
44719 *
44720 * @fmt: The page table format.
44721 * @cfg: The page table configuration. This will be modified to represent
44722@@ -94,9 +95,9 @@ struct io_pgtable_ops {
44723 * @cookie: An opaque token provided by the IOMMU driver and passed back to
44724 * the callback routines in cfg->tlb.
44725 */
44726-struct io_pgtable_ops *alloc_io_pgtable_ops(enum io_pgtable_fmt fmt,
44727- struct io_pgtable_cfg *cfg,
44728- void *cookie);
44729+struct io_pgtable *alloc_io_pgtable(enum io_pgtable_fmt fmt,
44730+ struct io_pgtable_cfg *cfg,
44731+ void *cookie);
44732
44733 /**
44734 * free_io_pgtable_ops() - Free an io_pgtable_ops structure. The caller
44735@@ -105,7 +106,7 @@ struct io_pgtable_ops *alloc_io_pgtable_ops(enum io_pgtable_fmt fmt,
44736 *
44737 * @ops: The ops returned from alloc_io_pgtable_ops.
44738 */
44739-void free_io_pgtable_ops(struct io_pgtable_ops *ops);
44740+void free_io_pgtable(struct io_pgtable *iop);
44741
44742
44743 /*
44744@@ -125,7 +126,7 @@ struct io_pgtable {
44745 enum io_pgtable_fmt fmt;
44746 void *cookie;
44747 struct io_pgtable_cfg cfg;
44748- struct io_pgtable_ops ops;
44749+ const struct io_pgtable_ops *ops;
44750 };
44751
44752 /**
44753diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
44754index 72e683d..c9db262 100644
44755--- a/drivers/iommu/iommu.c
44756+++ b/drivers/iommu/iommu.c
44757@@ -802,7 +802,7 @@ static int iommu_bus_notifier(struct notifier_block *nb,
44758 static int iommu_bus_init(struct bus_type *bus, const struct iommu_ops *ops)
44759 {
44760 int err;
44761- struct notifier_block *nb;
44762+ notifier_block_no_const *nb;
44763 struct iommu_callback_data cb = {
44764 .ops = ops,
44765 };
44766diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
44767index bc39bdf..e2de272 100644
44768--- a/drivers/iommu/ipmmu-vmsa.c
44769+++ b/drivers/iommu/ipmmu-vmsa.c
44770@@ -41,7 +41,7 @@ struct ipmmu_vmsa_domain {
44771 struct iommu_domain *io_domain;
44772
44773 struct io_pgtable_cfg cfg;
44774- struct io_pgtable_ops *iop;
44775+ struct io_pgtable *iop;
44776
44777 unsigned int context_id;
44778 spinlock_t lock; /* Protects mappings */
44779@@ -323,8 +323,7 @@ static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain)
44780 domain->cfg.oas = 40;
44781 domain->cfg.tlb = &ipmmu_gather_ops;
44782
44783- domain->iop = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &domain->cfg,
44784- domain);
44785+ domain->iop = alloc_io_pgtable(ARM_32_LPAE_S1, &domain->cfg, domain);
44786 if (!domain->iop)
44787 return -EINVAL;
44788
44789@@ -482,7 +481,7 @@ static void ipmmu_domain_destroy(struct iommu_domain *io_domain)
44790 * been detached.
44791 */
44792 ipmmu_domain_destroy_context(domain);
44793- free_io_pgtable_ops(domain->iop);
44794+ free_io_pgtable(domain->iop);
44795 kfree(domain);
44796 }
44797
44798@@ -551,7 +550,7 @@ static int ipmmu_map(struct iommu_domain *io_domain, unsigned long iova,
44799 if (!domain)
44800 return -ENODEV;
44801
44802- return domain->iop->map(domain->iop, iova, paddr, size, prot);
44803+ return domain->iop->ops->map(domain->iop, iova, paddr, size, prot);
44804 }
44805
44806 static size_t ipmmu_unmap(struct iommu_domain *io_domain, unsigned long iova,
44807@@ -559,7 +558,7 @@ static size_t ipmmu_unmap(struct iommu_domain *io_domain, unsigned long iova,
44808 {
44809 struct ipmmu_vmsa_domain *domain = io_domain->priv;
44810
44811- return domain->iop->unmap(domain->iop, iova, size);
44812+ return domain->iop->ops->unmap(domain->iop, iova, size);
44813 }
44814
44815 static phys_addr_t ipmmu_iova_to_phys(struct iommu_domain *io_domain,
44816@@ -569,7 +568,7 @@ static phys_addr_t ipmmu_iova_to_phys(struct iommu_domain *io_domain,
44817
44818 /* TODO: Is locking needed ? */
44819
44820- return domain->iop->iova_to_phys(domain->iop, iova);
44821+ return domain->iop->ops->iova_to_phys(domain->iop, iova);
44822 }
44823
44824 static int ipmmu_find_utlbs(struct ipmmu_vmsa_device *mmu, struct device *dev,
44825diff --git a/drivers/iommu/irq_remapping.c b/drivers/iommu/irq_remapping.c
44826index 390079e..1da9d6c 100644
44827--- a/drivers/iommu/irq_remapping.c
44828+++ b/drivers/iommu/irq_remapping.c
44829@@ -329,7 +329,7 @@ int setup_hpet_msi_remapped(unsigned int irq, unsigned int id)
44830 void panic_if_irq_remap(const char *msg)
44831 {
44832 if (irq_remapping_enabled)
44833- panic(msg);
44834+ panic("%s", msg);
44835 }
44836
44837 static void ir_ack_apic_edge(struct irq_data *data)
44838@@ -350,10 +350,12 @@ static void ir_print_prefix(struct irq_data *data, struct seq_file *p)
44839
44840 void irq_remap_modify_chip_defaults(struct irq_chip *chip)
44841 {
44842- chip->irq_print_chip = ir_print_prefix;
44843- chip->irq_ack = ir_ack_apic_edge;
44844- chip->irq_eoi = ir_ack_apic_level;
44845- chip->irq_set_affinity = x86_io_apic_ops.set_affinity;
44846+ pax_open_kernel();
44847+ *(void **)&chip->irq_print_chip = ir_print_prefix;
44848+ *(void **)&chip->irq_ack = ir_ack_apic_edge;
44849+ *(void **)&chip->irq_eoi = ir_ack_apic_level;
44850+ *(void **)&chip->irq_set_affinity = x86_io_apic_ops.set_affinity;
44851+ pax_close_kernel();
44852 }
44853
44854 bool setup_remapped_irq(int irq, struct irq_cfg *cfg, struct irq_chip *chip)
44855diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
44856index 471e1cd..b53b870 100644
44857--- a/drivers/irqchip/irq-gic.c
44858+++ b/drivers/irqchip/irq-gic.c
44859@@ -84,7 +84,7 @@ static u8 gic_cpu_map[NR_GIC_CPU_IF] __read_mostly;
44860 * Supported arch specific GIC irq extension.
44861 * Default make them NULL.
44862 */
44863-struct irq_chip gic_arch_extn = {
44864+irq_chip_no_const gic_arch_extn = {
44865 .irq_eoi = NULL,
44866 .irq_mask = NULL,
44867 .irq_unmask = NULL,
44868@@ -318,7 +318,7 @@ static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
44869 chained_irq_exit(chip, desc);
44870 }
44871
44872-static struct irq_chip gic_chip = {
44873+static irq_chip_no_const gic_chip __read_only = {
44874 .name = "GIC",
44875 .irq_mask = gic_mask_irq,
44876 .irq_unmask = gic_unmask_irq,
44877diff --git a/drivers/irqchip/irq-renesas-intc-irqpin.c b/drivers/irqchip/irq-renesas-intc-irqpin.c
44878index 9a0767b..5e5f86f 100644
44879--- a/drivers/irqchip/irq-renesas-intc-irqpin.c
44880+++ b/drivers/irqchip/irq-renesas-intc-irqpin.c
44881@@ -373,7 +373,7 @@ static int intc_irqpin_probe(struct platform_device *pdev)
44882 struct intc_irqpin_iomem *i;
44883 struct resource *io[INTC_IRQPIN_REG_NR];
44884 struct resource *irq;
44885- struct irq_chip *irq_chip;
44886+ irq_chip_no_const *irq_chip;
44887 void (*enable_fn)(struct irq_data *d);
44888 void (*disable_fn)(struct irq_data *d);
44889 const char *name = dev_name(dev);
44890diff --git a/drivers/irqchip/irq-renesas-irqc.c b/drivers/irqchip/irq-renesas-irqc.c
44891index 384e6ed..7a771b2 100644
44892--- a/drivers/irqchip/irq-renesas-irqc.c
44893+++ b/drivers/irqchip/irq-renesas-irqc.c
44894@@ -151,7 +151,7 @@ static int irqc_probe(struct platform_device *pdev)
44895 struct irqc_priv *p;
44896 struct resource *io;
44897 struct resource *irq;
44898- struct irq_chip *irq_chip;
44899+ irq_chip_no_const *irq_chip;
44900 const char *name = dev_name(&pdev->dev);
44901 int ret;
44902 int k;
44903diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
44904index 6a2df32..dc962f1 100644
44905--- a/drivers/isdn/capi/capi.c
44906+++ b/drivers/isdn/capi/capi.c
44907@@ -81,8 +81,8 @@ struct capiminor {
44908
44909 struct capi20_appl *ap;
44910 u32 ncci;
44911- atomic_t datahandle;
44912- atomic_t msgid;
44913+ atomic_unchecked_t datahandle;
44914+ atomic_unchecked_t msgid;
44915
44916 struct tty_port port;
44917 int ttyinstop;
44918@@ -391,7 +391,7 @@ gen_data_b3_resp_for(struct capiminor *mp, struct sk_buff *skb)
44919 capimsg_setu16(s, 2, mp->ap->applid);
44920 capimsg_setu8 (s, 4, CAPI_DATA_B3);
44921 capimsg_setu8 (s, 5, CAPI_RESP);
44922- capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
44923+ capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
44924 capimsg_setu32(s, 8, mp->ncci);
44925 capimsg_setu16(s, 12, datahandle);
44926 }
44927@@ -512,14 +512,14 @@ static void handle_minor_send(struct capiminor *mp)
44928 mp->outbytes -= len;
44929 spin_unlock_bh(&mp->outlock);
44930
44931- datahandle = atomic_inc_return(&mp->datahandle);
44932+ datahandle = atomic_inc_return_unchecked(&mp->datahandle);
44933 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
44934 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
44935 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
44936 capimsg_setu16(skb->data, 2, mp->ap->applid);
44937 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
44938 capimsg_setu8 (skb->data, 5, CAPI_REQ);
44939- capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
44940+ capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
44941 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
44942 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
44943 capimsg_setu16(skb->data, 16, len); /* Data length */
44944diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c
44945index aecec6d..11e13c5 100644
44946--- a/drivers/isdn/gigaset/bas-gigaset.c
44947+++ b/drivers/isdn/gigaset/bas-gigaset.c
44948@@ -2565,22 +2565,22 @@ static int gigaset_post_reset(struct usb_interface *intf)
44949
44950
44951 static const struct gigaset_ops gigops = {
44952- gigaset_write_cmd,
44953- gigaset_write_room,
44954- gigaset_chars_in_buffer,
44955- gigaset_brkchars,
44956- gigaset_init_bchannel,
44957- gigaset_close_bchannel,
44958- gigaset_initbcshw,
44959- gigaset_freebcshw,
44960- gigaset_reinitbcshw,
44961- gigaset_initcshw,
44962- gigaset_freecshw,
44963- gigaset_set_modem_ctrl,
44964- gigaset_baud_rate,
44965- gigaset_set_line_ctrl,
44966- gigaset_isoc_send_skb,
44967- gigaset_isoc_input,
44968+ .write_cmd = gigaset_write_cmd,
44969+ .write_room = gigaset_write_room,
44970+ .chars_in_buffer = gigaset_chars_in_buffer,
44971+ .brkchars = gigaset_brkchars,
44972+ .init_bchannel = gigaset_init_bchannel,
44973+ .close_bchannel = gigaset_close_bchannel,
44974+ .initbcshw = gigaset_initbcshw,
44975+ .freebcshw = gigaset_freebcshw,
44976+ .reinitbcshw = gigaset_reinitbcshw,
44977+ .initcshw = gigaset_initcshw,
44978+ .freecshw = gigaset_freecshw,
44979+ .set_modem_ctrl = gigaset_set_modem_ctrl,
44980+ .baud_rate = gigaset_baud_rate,
44981+ .set_line_ctrl = gigaset_set_line_ctrl,
44982+ .send_skb = gigaset_isoc_send_skb,
44983+ .handle_input = gigaset_isoc_input,
44984 };
44985
44986 /* bas_gigaset_init
44987diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
44988index 600c79b..3752bab 100644
44989--- a/drivers/isdn/gigaset/interface.c
44990+++ b/drivers/isdn/gigaset/interface.c
44991@@ -130,9 +130,9 @@ static int if_open(struct tty_struct *tty, struct file *filp)
44992 }
44993 tty->driver_data = cs;
44994
44995- ++cs->port.count;
44996+ atomic_inc(&cs->port.count);
44997
44998- if (cs->port.count == 1) {
44999+ if (atomic_read(&cs->port.count) == 1) {
45000 tty_port_tty_set(&cs->port, tty);
45001 cs->port.low_latency = 1;
45002 }
45003@@ -156,9 +156,9 @@ static void if_close(struct tty_struct *tty, struct file *filp)
45004
45005 if (!cs->connected)
45006 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
45007- else if (!cs->port.count)
45008+ else if (!atomic_read(&cs->port.count))
45009 dev_warn(cs->dev, "%s: device not opened\n", __func__);
45010- else if (!--cs->port.count)
45011+ else if (!atomic_dec_return(&cs->port.count))
45012 tty_port_tty_set(&cs->port, NULL);
45013
45014 mutex_unlock(&cs->mutex);
45015diff --git a/drivers/isdn/gigaset/ser-gigaset.c b/drivers/isdn/gigaset/ser-gigaset.c
45016index 8c91fd5..14f13ce 100644
45017--- a/drivers/isdn/gigaset/ser-gigaset.c
45018+++ b/drivers/isdn/gigaset/ser-gigaset.c
45019@@ -453,22 +453,22 @@ static int gigaset_set_line_ctrl(struct cardstate *cs, unsigned cflag)
45020 }
45021
45022 static const struct gigaset_ops ops = {
45023- gigaset_write_cmd,
45024- gigaset_write_room,
45025- gigaset_chars_in_buffer,
45026- gigaset_brkchars,
45027- gigaset_init_bchannel,
45028- gigaset_close_bchannel,
45029- gigaset_initbcshw,
45030- gigaset_freebcshw,
45031- gigaset_reinitbcshw,
45032- gigaset_initcshw,
45033- gigaset_freecshw,
45034- gigaset_set_modem_ctrl,
45035- gigaset_baud_rate,
45036- gigaset_set_line_ctrl,
45037- gigaset_m10x_send_skb, /* asyncdata.c */
45038- gigaset_m10x_input, /* asyncdata.c */
45039+ .write_cmd = gigaset_write_cmd,
45040+ .write_room = gigaset_write_room,
45041+ .chars_in_buffer = gigaset_chars_in_buffer,
45042+ .brkchars = gigaset_brkchars,
45043+ .init_bchannel = gigaset_init_bchannel,
45044+ .close_bchannel = gigaset_close_bchannel,
45045+ .initbcshw = gigaset_initbcshw,
45046+ .freebcshw = gigaset_freebcshw,
45047+ .reinitbcshw = gigaset_reinitbcshw,
45048+ .initcshw = gigaset_initcshw,
45049+ .freecshw = gigaset_freecshw,
45050+ .set_modem_ctrl = gigaset_set_modem_ctrl,
45051+ .baud_rate = gigaset_baud_rate,
45052+ .set_line_ctrl = gigaset_set_line_ctrl,
45053+ .send_skb = gigaset_m10x_send_skb, /* asyncdata.c */
45054+ .handle_input = gigaset_m10x_input, /* asyncdata.c */
45055 };
45056
45057
45058diff --git a/drivers/isdn/gigaset/usb-gigaset.c b/drivers/isdn/gigaset/usb-gigaset.c
45059index 5f306e2..5342f88 100644
45060--- a/drivers/isdn/gigaset/usb-gigaset.c
45061+++ b/drivers/isdn/gigaset/usb-gigaset.c
45062@@ -543,7 +543,7 @@ static int gigaset_brkchars(struct cardstate *cs, const unsigned char buf[6])
45063 gigaset_dbg_buffer(DEBUG_USBREQ, "brkchars", 6, buf);
45064 memcpy(cs->hw.usb->bchars, buf, 6);
45065 return usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x19, 0x41,
45066- 0, 0, &buf, 6, 2000);
45067+ 0, 0, buf, 6, 2000);
45068 }
45069
45070 static void gigaset_freebcshw(struct bc_state *bcs)
45071@@ -862,22 +862,22 @@ static int gigaset_pre_reset(struct usb_interface *intf)
45072 }
45073
45074 static const struct gigaset_ops ops = {
45075- gigaset_write_cmd,
45076- gigaset_write_room,
45077- gigaset_chars_in_buffer,
45078- gigaset_brkchars,
45079- gigaset_init_bchannel,
45080- gigaset_close_bchannel,
45081- gigaset_initbcshw,
45082- gigaset_freebcshw,
45083- gigaset_reinitbcshw,
45084- gigaset_initcshw,
45085- gigaset_freecshw,
45086- gigaset_set_modem_ctrl,
45087- gigaset_baud_rate,
45088- gigaset_set_line_ctrl,
45089- gigaset_m10x_send_skb,
45090- gigaset_m10x_input,
45091+ .write_cmd = gigaset_write_cmd,
45092+ .write_room = gigaset_write_room,
45093+ .chars_in_buffer = gigaset_chars_in_buffer,
45094+ .brkchars = gigaset_brkchars,
45095+ .init_bchannel = gigaset_init_bchannel,
45096+ .close_bchannel = gigaset_close_bchannel,
45097+ .initbcshw = gigaset_initbcshw,
45098+ .freebcshw = gigaset_freebcshw,
45099+ .reinitbcshw = gigaset_reinitbcshw,
45100+ .initcshw = gigaset_initcshw,
45101+ .freecshw = gigaset_freecshw,
45102+ .set_modem_ctrl = gigaset_set_modem_ctrl,
45103+ .baud_rate = gigaset_baud_rate,
45104+ .set_line_ctrl = gigaset_set_line_ctrl,
45105+ .send_skb = gigaset_m10x_send_skb,
45106+ .handle_input = gigaset_m10x_input,
45107 };
45108
45109 /*
45110diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
45111index 4d9b195..455075c 100644
45112--- a/drivers/isdn/hardware/avm/b1.c
45113+++ b/drivers/isdn/hardware/avm/b1.c
45114@@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart *t4file)
45115 }
45116 if (left) {
45117 if (t4file->user) {
45118- if (copy_from_user(buf, dp, left))
45119+ if (left > sizeof buf || copy_from_user(buf, dp, left))
45120 return -EFAULT;
45121 } else {
45122 memcpy(buf, dp, left);
45123@@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capiloaddatapart *config)
45124 }
45125 if (left) {
45126 if (config->user) {
45127- if (copy_from_user(buf, dp, left))
45128+ if (left > sizeof buf || copy_from_user(buf, dp, left))
45129 return -EFAULT;
45130 } else {
45131 memcpy(buf, dp, left);
45132diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c
45133index 9b856e1..fa03c92 100644
45134--- a/drivers/isdn/i4l/isdn_common.c
45135+++ b/drivers/isdn/i4l/isdn_common.c
45136@@ -1654,6 +1654,8 @@ isdn_ioctl(struct file *file, uint cmd, ulong arg)
45137 } else
45138 return -EINVAL;
45139 case IIOCDBGVAR:
45140+ if (!capable(CAP_SYS_RAWIO))
45141+ return -EPERM;
45142 if (arg) {
45143 if (copy_to_user(argp, &dev, sizeof(ulong)))
45144 return -EFAULT;
45145diff --git a/drivers/isdn/i4l/isdn_concap.c b/drivers/isdn/i4l/isdn_concap.c
45146index 91d5730..336523e 100644
45147--- a/drivers/isdn/i4l/isdn_concap.c
45148+++ b/drivers/isdn/i4l/isdn_concap.c
45149@@ -80,9 +80,9 @@ static int isdn_concap_dl_disconn_req(struct concap_proto *concap)
45150 }
45151
45152 struct concap_device_ops isdn_concap_reliable_dl_dops = {
45153- &isdn_concap_dl_data_req,
45154- &isdn_concap_dl_connect_req,
45155- &isdn_concap_dl_disconn_req
45156+ .data_req = &isdn_concap_dl_data_req,
45157+ .connect_req = &isdn_concap_dl_connect_req,
45158+ .disconn_req = &isdn_concap_dl_disconn_req
45159 };
45160
45161 /* The following should better go into a dedicated source file such that
45162diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c
45163index bc91261..2ef7e36 100644
45164--- a/drivers/isdn/i4l/isdn_tty.c
45165+++ b/drivers/isdn/i4l/isdn_tty.c
45166@@ -1503,9 +1503,9 @@ isdn_tty_open(struct tty_struct *tty, struct file *filp)
45167
45168 #ifdef ISDN_DEBUG_MODEM_OPEN
45169 printk(KERN_DEBUG "isdn_tty_open %s, count = %d\n", tty->name,
45170- port->count);
45171+ atomic_read(&port->count));
45172 #endif
45173- port->count++;
45174+ atomic_inc(&port->count);
45175 port->tty = tty;
45176 /*
45177 * Start up serial port
45178@@ -1549,7 +1549,7 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
45179 #endif
45180 return;
45181 }
45182- if ((tty->count == 1) && (port->count != 1)) {
45183+ if ((tty->count == 1) && (atomic_read(&port->count) != 1)) {
45184 /*
45185 * Uh, oh. tty->count is 1, which means that the tty
45186 * structure will be freed. Info->count should always
45187@@ -1558,15 +1558,15 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
45188 * serial port won't be shutdown.
45189 */
45190 printk(KERN_ERR "isdn_tty_close: bad port count; tty->count is 1, "
45191- "info->count is %d\n", port->count);
45192- port->count = 1;
45193+ "info->count is %d\n", atomic_read(&port->count));
45194+ atomic_set(&port->count, 1);
45195 }
45196- if (--port->count < 0) {
45197+ if (atomic_dec_return(&port->count) < 0) {
45198 printk(KERN_ERR "isdn_tty_close: bad port count for ttyi%d: %d\n",
45199- info->line, port->count);
45200- port->count = 0;
45201+ info->line, atomic_read(&port->count));
45202+ atomic_set(&port->count, 0);
45203 }
45204- if (port->count) {
45205+ if (atomic_read(&port->count)) {
45206 #ifdef ISDN_DEBUG_MODEM_OPEN
45207 printk(KERN_DEBUG "isdn_tty_close after info->count != 0\n");
45208 #endif
45209@@ -1620,7 +1620,7 @@ isdn_tty_hangup(struct tty_struct *tty)
45210 if (isdn_tty_paranoia_check(info, tty->name, "isdn_tty_hangup"))
45211 return;
45212 isdn_tty_shutdown(info);
45213- port->count = 0;
45214+ atomic_set(&port->count, 0);
45215 port->flags &= ~ASYNC_NORMAL_ACTIVE;
45216 port->tty = NULL;
45217 wake_up_interruptible(&port->open_wait);
45218@@ -1965,7 +1965,7 @@ isdn_tty_find_icall(int di, int ch, setup_parm *setup)
45219 for (i = 0; i < ISDN_MAX_CHANNELS; i++) {
45220 modem_info *info = &dev->mdm.info[i];
45221
45222- if (info->port.count == 0)
45223+ if (atomic_read(&info->port.count) == 0)
45224 continue;
45225 if ((info->emu.mdmreg[REG_SI1] & si2bit[si1]) && /* SI1 is matching */
45226 (info->emu.mdmreg[REG_SI2] == si2)) { /* SI2 is matching */
45227diff --git a/drivers/isdn/i4l/isdn_x25iface.c b/drivers/isdn/i4l/isdn_x25iface.c
45228index e2d4e58..40cd045 100644
45229--- a/drivers/isdn/i4l/isdn_x25iface.c
45230+++ b/drivers/isdn/i4l/isdn_x25iface.c
45231@@ -53,14 +53,14 @@ static int isdn_x25iface_disconn_ind(struct concap_proto *);
45232
45233
45234 static struct concap_proto_ops ix25_pops = {
45235- &isdn_x25iface_proto_new,
45236- &isdn_x25iface_proto_del,
45237- &isdn_x25iface_proto_restart,
45238- &isdn_x25iface_proto_close,
45239- &isdn_x25iface_xmit,
45240- &isdn_x25iface_receive,
45241- &isdn_x25iface_connect_ind,
45242- &isdn_x25iface_disconn_ind
45243+ .proto_new = &isdn_x25iface_proto_new,
45244+ .proto_del = &isdn_x25iface_proto_del,
45245+ .restart = &isdn_x25iface_proto_restart,
45246+ .close = &isdn_x25iface_proto_close,
45247+ .encap_and_xmit = &isdn_x25iface_xmit,
45248+ .data_ind = &isdn_x25iface_receive,
45249+ .connect_ind = &isdn_x25iface_connect_ind,
45250+ .disconn_ind = &isdn_x25iface_disconn_ind
45251 };
45252
45253 /* error message helper function */
45254diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
45255index 358a574..b4987ea 100644
45256--- a/drivers/isdn/icn/icn.c
45257+++ b/drivers/isdn/icn/icn.c
45258@@ -1045,7 +1045,7 @@ icn_writecmd(const u_char *buf, int len, int user, icn_card *card)
45259 if (count > len)
45260 count = len;
45261 if (user) {
45262- if (copy_from_user(msg, buf, count))
45263+ if (count > sizeof msg || copy_from_user(msg, buf, count))
45264 return -EFAULT;
45265 } else
45266 memcpy(msg, buf, count);
45267diff --git a/drivers/isdn/mISDN/dsp_cmx.c b/drivers/isdn/mISDN/dsp_cmx.c
45268index 87f7dff..7300125 100644
45269--- a/drivers/isdn/mISDN/dsp_cmx.c
45270+++ b/drivers/isdn/mISDN/dsp_cmx.c
45271@@ -1625,7 +1625,7 @@ unsigned long dsp_spl_jiffies; /* calculate the next time to fire */
45272 static u16 dsp_count; /* last sample count */
45273 static int dsp_count_valid; /* if we have last sample count */
45274
45275-void
45276+void __intentional_overflow(-1)
45277 dsp_cmx_send(void *arg)
45278 {
45279 struct dsp_conf *conf;
45280diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
45281index 312ffd3..9263d05 100644
45282--- a/drivers/lguest/core.c
45283+++ b/drivers/lguest/core.c
45284@@ -96,9 +96,17 @@ static __init int map_switcher(void)
45285 * The end address needs +1 because __get_vm_area allocates an
45286 * extra guard page, so we need space for that.
45287 */
45288+
45289+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
45290+ switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
45291+ VM_ALLOC | VM_KERNEXEC, switcher_addr, switcher_addr
45292+ + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
45293+#else
45294 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
45295 VM_ALLOC, switcher_addr, switcher_addr
45296 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
45297+#endif
45298+
45299 if (!switcher_vma) {
45300 err = -ENOMEM;
45301 printk("lguest: could not map switcher pages high\n");
45302@@ -121,7 +129,7 @@ static __init int map_switcher(void)
45303 * Now the Switcher is mapped at the right address, we can't fail!
45304 * Copy in the compiled-in Switcher code (from x86/switcher_32.S).
45305 */
45306- memcpy(switcher_vma->addr, start_switcher_text,
45307+ memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
45308 end_switcher_text - start_switcher_text);
45309
45310 printk(KERN_INFO "lguest: mapped switcher at %p\n",
45311diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c
45312index e3abebc9..6a35328 100644
45313--- a/drivers/lguest/page_tables.c
45314+++ b/drivers/lguest/page_tables.c
45315@@ -585,7 +585,7 @@ void pin_page(struct lg_cpu *cpu, unsigned long vaddr)
45316 /*:*/
45317
45318 #ifdef CONFIG_X86_PAE
45319-static void release_pmd(pmd_t *spmd)
45320+static void __intentional_overflow(-1) release_pmd(pmd_t *spmd)
45321 {
45322 /* If the entry's not present, there's nothing to release. */
45323 if (pmd_flags(*spmd) & _PAGE_PRESENT) {
45324diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
45325index 30f2aef..391c748 100644
45326--- a/drivers/lguest/x86/core.c
45327+++ b/drivers/lguest/x86/core.c
45328@@ -60,7 +60,7 @@ static struct {
45329 /* Offset from where switcher.S was compiled to where we've copied it */
45330 static unsigned long switcher_offset(void)
45331 {
45332- return switcher_addr - (unsigned long)start_switcher_text;
45333+ return switcher_addr - (unsigned long)ktla_ktva(start_switcher_text);
45334 }
45335
45336 /* This cpu's struct lguest_pages (after the Switcher text page) */
45337@@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
45338 * These copies are pretty cheap, so we do them unconditionally: */
45339 /* Save the current Host top-level page directory.
45340 */
45341+
45342+#ifdef CONFIG_PAX_PER_CPU_PGD
45343+ pages->state.host_cr3 = read_cr3();
45344+#else
45345 pages->state.host_cr3 = __pa(current->mm->pgd);
45346+#endif
45347+
45348 /*
45349 * Set up the Guest's page tables to see this CPU's pages (and no
45350 * other CPU's pages).
45351@@ -494,7 +500,7 @@ void __init lguest_arch_host_init(void)
45352 * compiled-in switcher code and the high-mapped copy we just made.
45353 */
45354 for (i = 0; i < IDT_ENTRIES; i++)
45355- default_idt_entries[i] += switcher_offset();
45356+ default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
45357
45358 /*
45359 * Set up the Switcher's per-cpu areas.
45360@@ -577,7 +583,7 @@ void __init lguest_arch_host_init(void)
45361 * it will be undisturbed when we switch. To change %cs and jump we
45362 * need this structure to feed to Intel's "lcall" instruction.
45363 */
45364- lguest_entry.offset = (long)switch_to_guest + switcher_offset();
45365+ lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
45366 lguest_entry.segment = LGUEST_CS;
45367
45368 /*
45369diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
45370index 40634b0..4f5855e 100644
45371--- a/drivers/lguest/x86/switcher_32.S
45372+++ b/drivers/lguest/x86/switcher_32.S
45373@@ -87,6 +87,7 @@
45374 #include <asm/page.h>
45375 #include <asm/segment.h>
45376 #include <asm/lguest.h>
45377+#include <asm/processor-flags.h>
45378
45379 // We mark the start of the code to copy
45380 // It's placed in .text tho it's never run here
45381@@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
45382 // Changes type when we load it: damn Intel!
45383 // For after we switch over our page tables
45384 // That entry will be read-only: we'd crash.
45385+
45386+#ifdef CONFIG_PAX_KERNEXEC
45387+ mov %cr0, %edx
45388+ xor $X86_CR0_WP, %edx
45389+ mov %edx, %cr0
45390+#endif
45391+
45392 movl $(GDT_ENTRY_TSS*8), %edx
45393 ltr %dx
45394
45395@@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
45396 // Let's clear it again for our return.
45397 // The GDT descriptor of the Host
45398 // Points to the table after two "size" bytes
45399- movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
45400+ movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
45401 // Clear "used" from type field (byte 5, bit 2)
45402- andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
45403+ andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
45404+
45405+#ifdef CONFIG_PAX_KERNEXEC
45406+ mov %cr0, %eax
45407+ xor $X86_CR0_WP, %eax
45408+ mov %eax, %cr0
45409+#endif
45410
45411 // Once our page table's switched, the Guest is live!
45412 // The Host fades as we run this final step.
45413@@ -295,13 +309,12 @@ deliver_to_host:
45414 // I consulted gcc, and it gave
45415 // These instructions, which I gladly credit:
45416 leal (%edx,%ebx,8), %eax
45417- movzwl (%eax),%edx
45418- movl 4(%eax), %eax
45419- xorw %ax, %ax
45420- orl %eax, %edx
45421+ movl 4(%eax), %edx
45422+ movw (%eax), %dx
45423 // Now the address of the handler's in %edx
45424 // We call it now: its "iret" drops us home.
45425- jmp *%edx
45426+ ljmp $__KERNEL_CS, $1f
45427+1: jmp *%edx
45428
45429 // Every interrupt can come to us here
45430 // But we must truly tell each apart.
45431diff --git a/drivers/md/bcache/closure.h b/drivers/md/bcache/closure.h
45432index a08e3ee..df8ade2 100644
45433--- a/drivers/md/bcache/closure.h
45434+++ b/drivers/md/bcache/closure.h
45435@@ -238,7 +238,7 @@ static inline void closure_set_stopped(struct closure *cl)
45436 static inline void set_closure_fn(struct closure *cl, closure_fn *fn,
45437 struct workqueue_struct *wq)
45438 {
45439- BUG_ON(object_is_on_stack(cl));
45440+ BUG_ON(object_starts_on_stack(cl));
45441 closure_set_ip(cl);
45442 cl->fn = fn;
45443 cl->wq = wq;
45444diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
45445index 3a57679..c58cdaf 100644
45446--- a/drivers/md/bitmap.c
45447+++ b/drivers/md/bitmap.c
45448@@ -1786,7 +1786,7 @@ void bitmap_status(struct seq_file *seq, struct bitmap *bitmap)
45449 chunk_kb ? "KB" : "B");
45450 if (bitmap->storage.file) {
45451 seq_printf(seq, ", file: ");
45452- seq_path(seq, &bitmap->storage.file->f_path, " \t\n");
45453+ seq_path(seq, &bitmap->storage.file->f_path, " \t\n\\");
45454 }
45455
45456 seq_printf(seq, "\n");
45457diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
45458index c8a18e4..0ab43e5 100644
45459--- a/drivers/md/dm-ioctl.c
45460+++ b/drivers/md/dm-ioctl.c
45461@@ -1772,7 +1772,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
45462 cmd == DM_LIST_VERSIONS_CMD)
45463 return 0;
45464
45465- if ((cmd == DM_DEV_CREATE_CMD)) {
45466+ if (cmd == DM_DEV_CREATE_CMD) {
45467 if (!*param->name) {
45468 DMWARN("name not supplied when creating device");
45469 return -EINVAL;
45470diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
45471index 089d627..ef7352e 100644
45472--- a/drivers/md/dm-raid1.c
45473+++ b/drivers/md/dm-raid1.c
45474@@ -40,7 +40,7 @@ enum dm_raid1_error {
45475
45476 struct mirror {
45477 struct mirror_set *ms;
45478- atomic_t error_count;
45479+ atomic_unchecked_t error_count;
45480 unsigned long error_type;
45481 struct dm_dev *dev;
45482 sector_t offset;
45483@@ -186,7 +186,7 @@ static struct mirror *get_valid_mirror(struct mirror_set *ms)
45484 struct mirror *m;
45485
45486 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
45487- if (!atomic_read(&m->error_count))
45488+ if (!atomic_read_unchecked(&m->error_count))
45489 return m;
45490
45491 return NULL;
45492@@ -218,7 +218,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
45493 * simple way to tell if a device has encountered
45494 * errors.
45495 */
45496- atomic_inc(&m->error_count);
45497+ atomic_inc_unchecked(&m->error_count);
45498
45499 if (test_and_set_bit(error_type, &m->error_type))
45500 return;
45501@@ -409,7 +409,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
45502 struct mirror *m = get_default_mirror(ms);
45503
45504 do {
45505- if (likely(!atomic_read(&m->error_count)))
45506+ if (likely(!atomic_read_unchecked(&m->error_count)))
45507 return m;
45508
45509 if (m-- == ms->mirror)
45510@@ -423,7 +423,7 @@ static int default_ok(struct mirror *m)
45511 {
45512 struct mirror *default_mirror = get_default_mirror(m->ms);
45513
45514- return !atomic_read(&default_mirror->error_count);
45515+ return !atomic_read_unchecked(&default_mirror->error_count);
45516 }
45517
45518 static int mirror_available(struct mirror_set *ms, struct bio *bio)
45519@@ -560,7 +560,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
45520 */
45521 if (likely(region_in_sync(ms, region, 1)))
45522 m = choose_mirror(ms, bio->bi_iter.bi_sector);
45523- else if (m && atomic_read(&m->error_count))
45524+ else if (m && atomic_read_unchecked(&m->error_count))
45525 m = NULL;
45526
45527 if (likely(m))
45528@@ -936,7 +936,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
45529 }
45530
45531 ms->mirror[mirror].ms = ms;
45532- atomic_set(&(ms->mirror[mirror].error_count), 0);
45533+ atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
45534 ms->mirror[mirror].error_type = 0;
45535 ms->mirror[mirror].offset = offset;
45536
45537@@ -1351,7 +1351,7 @@ static void mirror_resume(struct dm_target *ti)
45538 */
45539 static char device_status_char(struct mirror *m)
45540 {
45541- if (!atomic_read(&(m->error_count)))
45542+ if (!atomic_read_unchecked(&(m->error_count)))
45543 return 'A';
45544
45545 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
45546diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c
45547index f478a4c..4b8e5ef 100644
45548--- a/drivers/md/dm-stats.c
45549+++ b/drivers/md/dm-stats.c
45550@@ -382,7 +382,7 @@ do_sync_free:
45551 synchronize_rcu_expedited();
45552 dm_stat_free(&s->rcu_head);
45553 } else {
45554- ACCESS_ONCE(dm_stat_need_rcu_barrier) = 1;
45555+ ACCESS_ONCE_RW(dm_stat_need_rcu_barrier) = 1;
45556 call_rcu(&s->rcu_head, dm_stat_free);
45557 }
45558 return 0;
45559@@ -554,8 +554,8 @@ void dm_stats_account_io(struct dm_stats *stats, unsigned long bi_rw,
45560 ((bi_rw & (REQ_WRITE | REQ_DISCARD)) ==
45561 (ACCESS_ONCE(last->last_rw) & (REQ_WRITE | REQ_DISCARD)))
45562 ));
45563- ACCESS_ONCE(last->last_sector) = end_sector;
45564- ACCESS_ONCE(last->last_rw) = bi_rw;
45565+ ACCESS_ONCE_RW(last->last_sector) = end_sector;
45566+ ACCESS_ONCE_RW(last->last_rw) = bi_rw;
45567 }
45568
45569 rcu_read_lock();
45570diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
45571index f8b37d4..5c5cafd 100644
45572--- a/drivers/md/dm-stripe.c
45573+++ b/drivers/md/dm-stripe.c
45574@@ -21,7 +21,7 @@ struct stripe {
45575 struct dm_dev *dev;
45576 sector_t physical_start;
45577
45578- atomic_t error_count;
45579+ atomic_unchecked_t error_count;
45580 };
45581
45582 struct stripe_c {
45583@@ -188,7 +188,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
45584 kfree(sc);
45585 return r;
45586 }
45587- atomic_set(&(sc->stripe[i].error_count), 0);
45588+ atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
45589 }
45590
45591 ti->private = sc;
45592@@ -332,7 +332,7 @@ static void stripe_status(struct dm_target *ti, status_type_t type,
45593 DMEMIT("%d ", sc->stripes);
45594 for (i = 0; i < sc->stripes; i++) {
45595 DMEMIT("%s ", sc->stripe[i].dev->name);
45596- buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
45597+ buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
45598 'D' : 'A';
45599 }
45600 buffer[i] = '\0';
45601@@ -377,8 +377,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio, int error)
45602 */
45603 for (i = 0; i < sc->stripes; i++)
45604 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
45605- atomic_inc(&(sc->stripe[i].error_count));
45606- if (atomic_read(&(sc->stripe[i].error_count)) <
45607+ atomic_inc_unchecked(&(sc->stripe[i].error_count));
45608+ if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
45609 DM_IO_ERROR_THRESHOLD)
45610 schedule_work(&sc->trigger_event);
45611 }
45612diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
45613index 757f1ba..bf9ec8f 100644
45614--- a/drivers/md/dm-table.c
45615+++ b/drivers/md/dm-table.c
45616@@ -303,7 +303,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
45617 if (!dev_size)
45618 return 0;
45619
45620- if ((start >= dev_size) || (start + len > dev_size)) {
45621+ if ((start >= dev_size) || (len > dev_size - start)) {
45622 DMWARN("%s: %s too small for target: "
45623 "start=%llu, len=%llu, dev_size=%llu",
45624 dm_device_name(ti->table->md), bdevname(bdev, b),
45625diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
45626index 79f6941..b33b4e0 100644
45627--- a/drivers/md/dm-thin-metadata.c
45628+++ b/drivers/md/dm-thin-metadata.c
45629@@ -404,7 +404,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
45630 {
45631 pmd->info.tm = pmd->tm;
45632 pmd->info.levels = 2;
45633- pmd->info.value_type.context = pmd->data_sm;
45634+ pmd->info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
45635 pmd->info.value_type.size = sizeof(__le64);
45636 pmd->info.value_type.inc = data_block_inc;
45637 pmd->info.value_type.dec = data_block_dec;
45638@@ -423,7 +423,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
45639
45640 pmd->bl_info.tm = pmd->tm;
45641 pmd->bl_info.levels = 1;
45642- pmd->bl_info.value_type.context = pmd->data_sm;
45643+ pmd->bl_info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
45644 pmd->bl_info.value_type.size = sizeof(__le64);
45645 pmd->bl_info.value_type.inc = data_block_inc;
45646 pmd->bl_info.value_type.dec = data_block_dec;
45647diff --git a/drivers/md/dm.c b/drivers/md/dm.c
45648index beda011..de57372 100644
45649--- a/drivers/md/dm.c
45650+++ b/drivers/md/dm.c
45651@@ -188,9 +188,9 @@ struct mapped_device {
45652 /*
45653 * Event handling.
45654 */
45655- atomic_t event_nr;
45656+ atomic_unchecked_t event_nr;
45657 wait_queue_head_t eventq;
45658- atomic_t uevent_seq;
45659+ atomic_unchecked_t uevent_seq;
45660 struct list_head uevent_list;
45661 spinlock_t uevent_lock; /* Protect access to uevent_list */
45662
45663@@ -2170,8 +2170,8 @@ static struct mapped_device *alloc_dev(int minor)
45664 spin_lock_init(&md->deferred_lock);
45665 atomic_set(&md->holders, 1);
45666 atomic_set(&md->open_count, 0);
45667- atomic_set(&md->event_nr, 0);
45668- atomic_set(&md->uevent_seq, 0);
45669+ atomic_set_unchecked(&md->event_nr, 0);
45670+ atomic_set_unchecked(&md->uevent_seq, 0);
45671 INIT_LIST_HEAD(&md->uevent_list);
45672 INIT_LIST_HEAD(&md->table_devices);
45673 spin_lock_init(&md->uevent_lock);
45674@@ -2336,7 +2336,7 @@ static void event_callback(void *context)
45675
45676 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
45677
45678- atomic_inc(&md->event_nr);
45679+ atomic_inc_unchecked(&md->event_nr);
45680 wake_up(&md->eventq);
45681 }
45682
45683@@ -3182,18 +3182,18 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
45684
45685 uint32_t dm_next_uevent_seq(struct mapped_device *md)
45686 {
45687- return atomic_add_return(1, &md->uevent_seq);
45688+ return atomic_add_return_unchecked(1, &md->uevent_seq);
45689 }
45690
45691 uint32_t dm_get_event_nr(struct mapped_device *md)
45692 {
45693- return atomic_read(&md->event_nr);
45694+ return atomic_read_unchecked(&md->event_nr);
45695 }
45696
45697 int dm_wait_event(struct mapped_device *md, int event_nr)
45698 {
45699 return wait_event_interruptible(md->eventq,
45700- (event_nr != atomic_read(&md->event_nr)));
45701+ (event_nr != atomic_read_unchecked(&md->event_nr)));
45702 }
45703
45704 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
45705diff --git a/drivers/md/md.c b/drivers/md/md.c
45706index b7bf8ee..ee17152 100644
45707--- a/drivers/md/md.c
45708+++ b/drivers/md/md.c
45709@@ -191,10 +191,10 @@ EXPORT_SYMBOL_GPL(bio_clone_mddev);
45710 * start build, activate spare
45711 */
45712 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
45713-static atomic_t md_event_count;
45714+static atomic_unchecked_t md_event_count;
45715 void md_new_event(struct mddev *mddev)
45716 {
45717- atomic_inc(&md_event_count);
45718+ atomic_inc_unchecked(&md_event_count);
45719 wake_up(&md_event_waiters);
45720 }
45721 EXPORT_SYMBOL_GPL(md_new_event);
45722@@ -204,7 +204,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
45723 */
45724 static void md_new_event_inintr(struct mddev *mddev)
45725 {
45726- atomic_inc(&md_event_count);
45727+ atomic_inc_unchecked(&md_event_count);
45728 wake_up(&md_event_waiters);
45729 }
45730
45731@@ -1442,7 +1442,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
45732 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE) &&
45733 (le32_to_cpu(sb->feature_map) & MD_FEATURE_NEW_OFFSET))
45734 rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset);
45735- atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
45736+ atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
45737
45738 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
45739 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
45740@@ -1693,7 +1693,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
45741 else
45742 sb->resync_offset = cpu_to_le64(0);
45743
45744- sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
45745+ sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
45746
45747 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
45748 sb->size = cpu_to_le64(mddev->dev_sectors);
45749@@ -2564,7 +2564,7 @@ __ATTR_PREALLOC(state, S_IRUGO|S_IWUSR, state_show, state_store);
45750 static ssize_t
45751 errors_show(struct md_rdev *rdev, char *page)
45752 {
45753- return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
45754+ return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
45755 }
45756
45757 static ssize_t
45758@@ -2573,7 +2573,7 @@ errors_store(struct md_rdev *rdev, const char *buf, size_t len)
45759 char *e;
45760 unsigned long n = simple_strtoul(buf, &e, 10);
45761 if (*buf && (*e == 0 || *e == '\n')) {
45762- atomic_set(&rdev->corrected_errors, n);
45763+ atomic_set_unchecked(&rdev->corrected_errors, n);
45764 return len;
45765 }
45766 return -EINVAL;
45767@@ -3009,8 +3009,8 @@ int md_rdev_init(struct md_rdev *rdev)
45768 rdev->sb_loaded = 0;
45769 rdev->bb_page = NULL;
45770 atomic_set(&rdev->nr_pending, 0);
45771- atomic_set(&rdev->read_errors, 0);
45772- atomic_set(&rdev->corrected_errors, 0);
45773+ atomic_set_unchecked(&rdev->read_errors, 0);
45774+ atomic_set_unchecked(&rdev->corrected_errors, 0);
45775
45776 INIT_LIST_HEAD(&rdev->same_set);
45777 init_waitqueue_head(&rdev->blocked_wait);
45778@@ -7086,7 +7086,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
45779
45780 spin_unlock(&pers_lock);
45781 seq_printf(seq, "\n");
45782- seq->poll_event = atomic_read(&md_event_count);
45783+ seq->poll_event = atomic_read_unchecked(&md_event_count);
45784 return 0;
45785 }
45786 if (v == (void*)2) {
45787@@ -7189,7 +7189,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
45788 return error;
45789
45790 seq = file->private_data;
45791- seq->poll_event = atomic_read(&md_event_count);
45792+ seq->poll_event = atomic_read_unchecked(&md_event_count);
45793 return error;
45794 }
45795
45796@@ -7206,7 +7206,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
45797 /* always allow read */
45798 mask = POLLIN | POLLRDNORM;
45799
45800- if (seq->poll_event != atomic_read(&md_event_count))
45801+ if (seq->poll_event != atomic_read_unchecked(&md_event_count))
45802 mask |= POLLERR | POLLPRI;
45803 return mask;
45804 }
45805@@ -7253,7 +7253,7 @@ static int is_mddev_idle(struct mddev *mddev, int init)
45806 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
45807 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
45808 (int)part_stat_read(&disk->part0, sectors[1]) -
45809- atomic_read(&disk->sync_io);
45810+ atomic_read_unchecked(&disk->sync_io);
45811 /* sync IO will cause sync_io to increase before the disk_stats
45812 * as sync_io is counted when a request starts, and
45813 * disk_stats is counted when it completes.
45814diff --git a/drivers/md/md.h b/drivers/md/md.h
45815index 318ca8f..31e4478 100644
45816--- a/drivers/md/md.h
45817+++ b/drivers/md/md.h
45818@@ -94,13 +94,13 @@ struct md_rdev {
45819 * only maintained for arrays that
45820 * support hot removal
45821 */
45822- atomic_t read_errors; /* number of consecutive read errors that
45823+ atomic_unchecked_t read_errors; /* number of consecutive read errors that
45824 * we have tried to ignore.
45825 */
45826 struct timespec last_read_error; /* monotonic time since our
45827 * last read error
45828 */
45829- atomic_t corrected_errors; /* number of corrected read errors,
45830+ atomic_unchecked_t corrected_errors; /* number of corrected read errors,
45831 * for reporting to userspace and storing
45832 * in superblock.
45833 */
45834@@ -476,7 +476,7 @@ extern void mddev_unlock(struct mddev *mddev);
45835
45836 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
45837 {
45838- atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
45839+ atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
45840 }
45841
45842 struct md_personality
45843diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
45844index e8a9042..35bd145 100644
45845--- a/drivers/md/persistent-data/dm-space-map-metadata.c
45846+++ b/drivers/md/persistent-data/dm-space-map-metadata.c
45847@@ -683,7 +683,7 @@ static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks)
45848 * Flick into a mode where all blocks get allocated in the new area.
45849 */
45850 smm->begin = old_len;
45851- memcpy(sm, &bootstrap_ops, sizeof(*sm));
45852+ memcpy((void *)sm, &bootstrap_ops, sizeof(*sm));
45853
45854 /*
45855 * Extend.
45856@@ -714,7 +714,7 @@ out:
45857 /*
45858 * Switch back to normal behaviour.
45859 */
45860- memcpy(sm, &ops, sizeof(*sm));
45861+ memcpy((void *)sm, &ops, sizeof(*sm));
45862 return r;
45863 }
45864
45865diff --git a/drivers/md/persistent-data/dm-space-map.h b/drivers/md/persistent-data/dm-space-map.h
45866index 3e6d115..ffecdeb 100644
45867--- a/drivers/md/persistent-data/dm-space-map.h
45868+++ b/drivers/md/persistent-data/dm-space-map.h
45869@@ -71,6 +71,7 @@ struct dm_space_map {
45870 dm_sm_threshold_fn fn,
45871 void *context);
45872 };
45873+typedef struct dm_space_map __no_const dm_space_map_no_const;
45874
45875 /*----------------------------------------------------------------*/
45876
45877diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
45878index d34e238..34f8d98 100644
45879--- a/drivers/md/raid1.c
45880+++ b/drivers/md/raid1.c
45881@@ -1922,7 +1922,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
45882 if (r1_sync_page_io(rdev, sect, s,
45883 bio->bi_io_vec[idx].bv_page,
45884 READ) != 0)
45885- atomic_add(s, &rdev->corrected_errors);
45886+ atomic_add_unchecked(s, &rdev->corrected_errors);
45887 }
45888 sectors -= s;
45889 sect += s;
45890@@ -2155,7 +2155,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
45891 !test_bit(Faulty, &rdev->flags)) {
45892 if (r1_sync_page_io(rdev, sect, s,
45893 conf->tmppage, READ)) {
45894- atomic_add(s, &rdev->corrected_errors);
45895+ atomic_add_unchecked(s, &rdev->corrected_errors);
45896 printk(KERN_INFO
45897 "md/raid1:%s: read error corrected "
45898 "(%d sectors at %llu on %s)\n",
45899diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
45900index a7196c4..439f012 100644
45901--- a/drivers/md/raid10.c
45902+++ b/drivers/md/raid10.c
45903@@ -1934,7 +1934,7 @@ static void end_sync_read(struct bio *bio, int error)
45904 /* The write handler will notice the lack of
45905 * R10BIO_Uptodate and record any errors etc
45906 */
45907- atomic_add(r10_bio->sectors,
45908+ atomic_add_unchecked(r10_bio->sectors,
45909 &conf->mirrors[d].rdev->corrected_errors);
45910
45911 /* for reconstruct, we always reschedule after a read.
45912@@ -2291,7 +2291,7 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
45913 {
45914 struct timespec cur_time_mon;
45915 unsigned long hours_since_last;
45916- unsigned int read_errors = atomic_read(&rdev->read_errors);
45917+ unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
45918
45919 ktime_get_ts(&cur_time_mon);
45920
45921@@ -2313,9 +2313,9 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
45922 * overflowing the shift of read_errors by hours_since_last.
45923 */
45924 if (hours_since_last >= 8 * sizeof(read_errors))
45925- atomic_set(&rdev->read_errors, 0);
45926+ atomic_set_unchecked(&rdev->read_errors, 0);
45927 else
45928- atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
45929+ atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
45930 }
45931
45932 static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
45933@@ -2369,8 +2369,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
45934 return;
45935
45936 check_decay_read_errors(mddev, rdev);
45937- atomic_inc(&rdev->read_errors);
45938- if (atomic_read(&rdev->read_errors) > max_read_errors) {
45939+ atomic_inc_unchecked(&rdev->read_errors);
45940+ if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
45941 char b[BDEVNAME_SIZE];
45942 bdevname(rdev->bdev, b);
45943
45944@@ -2378,7 +2378,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
45945 "md/raid10:%s: %s: Raid device exceeded "
45946 "read_error threshold [cur %d:max %d]\n",
45947 mdname(mddev), b,
45948- atomic_read(&rdev->read_errors), max_read_errors);
45949+ atomic_read_unchecked(&rdev->read_errors), max_read_errors);
45950 printk(KERN_NOTICE
45951 "md/raid10:%s: %s: Failing raid device\n",
45952 mdname(mddev), b);
45953@@ -2533,7 +2533,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
45954 sect +
45955 choose_data_offset(r10_bio, rdev)),
45956 bdevname(rdev->bdev, b));
45957- atomic_add(s, &rdev->corrected_errors);
45958+ atomic_add_unchecked(s, &rdev->corrected_errors);
45959 }
45960
45961 rdev_dec_pending(rdev, mddev);
45962diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
45963index 007ab86..d11593d 100644
45964--- a/drivers/md/raid5.c
45965+++ b/drivers/md/raid5.c
45966@@ -947,23 +947,23 @@ async_copy_data(int frombio, struct bio *bio, struct page **page,
45967 struct bio_vec bvl;
45968 struct bvec_iter iter;
45969 struct page *bio_page;
45970- int page_offset;
45971+ s64 page_offset;
45972 struct async_submit_ctl submit;
45973 enum async_tx_flags flags = 0;
45974
45975 if (bio->bi_iter.bi_sector >= sector)
45976- page_offset = (signed)(bio->bi_iter.bi_sector - sector) * 512;
45977+ page_offset = (s64)(bio->bi_iter.bi_sector - sector) * 512;
45978 else
45979- page_offset = (signed)(sector - bio->bi_iter.bi_sector) * -512;
45980+ page_offset = (s64)(sector - bio->bi_iter.bi_sector) * -512;
45981
45982 if (frombio)
45983 flags |= ASYNC_TX_FENCE;
45984 init_async_submit(&submit, flags, tx, NULL, NULL, NULL);
45985
45986 bio_for_each_segment(bvl, bio, iter) {
45987- int len = bvl.bv_len;
45988- int clen;
45989- int b_offset = 0;
45990+ s64 len = bvl.bv_len;
45991+ s64 clen;
45992+ s64 b_offset = 0;
45993
45994 if (page_offset < 0) {
45995 b_offset = -page_offset;
45996@@ -1727,6 +1727,10 @@ static int grow_one_stripe(struct r5conf *conf, int hash)
45997 return 1;
45998 }
45999
46000+#ifdef CONFIG_GRKERNSEC_HIDESYM
46001+static atomic_unchecked_t raid5_cache_id = ATOMIC_INIT(0);
46002+#endif
46003+
46004 static int grow_stripes(struct r5conf *conf, int num)
46005 {
46006 struct kmem_cache *sc;
46007@@ -1738,7 +1742,11 @@ static int grow_stripes(struct r5conf *conf, int num)
46008 "raid%d-%s", conf->level, mdname(conf->mddev));
46009 else
46010 sprintf(conf->cache_name[0],
46011+#ifdef CONFIG_GRKERNSEC_HIDESYM
46012+ "raid%d-%08lx", conf->level, atomic_inc_return_unchecked(&raid5_cache_id));
46013+#else
46014 "raid%d-%p", conf->level, conf->mddev);
46015+#endif
46016 sprintf(conf->cache_name[1], "%s-alt", conf->cache_name[0]);
46017
46018 conf->active_name = 0;
46019@@ -2015,21 +2023,21 @@ static void raid5_end_read_request(struct bio * bi, int error)
46020 mdname(conf->mddev), STRIPE_SECTORS,
46021 (unsigned long long)s,
46022 bdevname(rdev->bdev, b));
46023- atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
46024+ atomic_add_unchecked(STRIPE_SECTORS, &rdev->corrected_errors);
46025 clear_bit(R5_ReadError, &sh->dev[i].flags);
46026 clear_bit(R5_ReWrite, &sh->dev[i].flags);
46027 } else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
46028 clear_bit(R5_ReadNoMerge, &sh->dev[i].flags);
46029
46030- if (atomic_read(&rdev->read_errors))
46031- atomic_set(&rdev->read_errors, 0);
46032+ if (atomic_read_unchecked(&rdev->read_errors))
46033+ atomic_set_unchecked(&rdev->read_errors, 0);
46034 } else {
46035 const char *bdn = bdevname(rdev->bdev, b);
46036 int retry = 0;
46037 int set_bad = 0;
46038
46039 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
46040- atomic_inc(&rdev->read_errors);
46041+ atomic_inc_unchecked(&rdev->read_errors);
46042 if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
46043 printk_ratelimited(
46044 KERN_WARNING
46045@@ -2057,7 +2065,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
46046 mdname(conf->mddev),
46047 (unsigned long long)s,
46048 bdn);
46049- } else if (atomic_read(&rdev->read_errors)
46050+ } else if (atomic_read_unchecked(&rdev->read_errors)
46051 > conf->max_nr_stripes)
46052 printk(KERN_WARNING
46053 "md/raid:%s: Too many read errors, failing device %s.\n",
46054diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c
46055index 983db75..ef9248c 100644
46056--- a/drivers/media/dvb-core/dvbdev.c
46057+++ b/drivers/media/dvb-core/dvbdev.c
46058@@ -185,7 +185,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
46059 const struct dvb_device *template, void *priv, int type)
46060 {
46061 struct dvb_device *dvbdev;
46062- struct file_operations *dvbdevfops;
46063+ file_operations_no_const *dvbdevfops;
46064 struct device *clsdev;
46065 int minor;
46066 int id;
46067diff --git a/drivers/media/dvb-frontends/af9033.h b/drivers/media/dvb-frontends/af9033.h
46068index 6ad22b6..6e90e2a 100644
46069--- a/drivers/media/dvb-frontends/af9033.h
46070+++ b/drivers/media/dvb-frontends/af9033.h
46071@@ -96,6 +96,6 @@ struct af9033_ops {
46072 int (*pid_filter_ctrl)(struct dvb_frontend *fe, int onoff);
46073 int (*pid_filter)(struct dvb_frontend *fe, int index, u16 pid,
46074 int onoff);
46075-};
46076+} __no_const;
46077
46078 #endif /* AF9033_H */
46079diff --git a/drivers/media/dvb-frontends/dib3000.h b/drivers/media/dvb-frontends/dib3000.h
46080index 9b6c3bb..baeb5c7 100644
46081--- a/drivers/media/dvb-frontends/dib3000.h
46082+++ b/drivers/media/dvb-frontends/dib3000.h
46083@@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
46084 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
46085 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
46086 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
46087-};
46088+} __no_const;
46089
46090 #if IS_ENABLED(CONFIG_DVB_DIB3000MB)
46091 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
46092diff --git a/drivers/media/dvb-frontends/dib7000p.h b/drivers/media/dvb-frontends/dib7000p.h
46093index 1fea0e9..321ce8f 100644
46094--- a/drivers/media/dvb-frontends/dib7000p.h
46095+++ b/drivers/media/dvb-frontends/dib7000p.h
46096@@ -64,7 +64,7 @@ struct dib7000p_ops {
46097 int (*get_adc_power)(struct dvb_frontend *fe);
46098 int (*slave_reset)(struct dvb_frontend *fe);
46099 struct dvb_frontend *(*init)(struct i2c_adapter *i2c_adap, u8 i2c_addr, struct dib7000p_config *cfg);
46100-};
46101+} __no_const;
46102
46103 #if IS_ENABLED(CONFIG_DVB_DIB7000P)
46104 void *dib7000p_attach(struct dib7000p_ops *ops);
46105diff --git a/drivers/media/dvb-frontends/dib8000.h b/drivers/media/dvb-frontends/dib8000.h
46106index 84cc103..5780c54 100644
46107--- a/drivers/media/dvb-frontends/dib8000.h
46108+++ b/drivers/media/dvb-frontends/dib8000.h
46109@@ -61,7 +61,7 @@ struct dib8000_ops {
46110 int (*pid_filter_ctrl)(struct dvb_frontend *fe, u8 onoff);
46111 int (*pid_filter)(struct dvb_frontend *fe, u8 id, u16 pid, u8 onoff);
46112 struct dvb_frontend *(*init)(struct i2c_adapter *i2c_adap, u8 i2c_addr, struct dib8000_config *cfg);
46113-};
46114+} __no_const;
46115
46116 #if IS_ENABLED(CONFIG_DVB_DIB8000)
46117 void *dib8000_attach(struct dib8000_ops *ops);
46118diff --git a/drivers/media/pci/cx88/cx88-video.c b/drivers/media/pci/cx88/cx88-video.c
46119index 860c98fc..497fa25 100644
46120--- a/drivers/media/pci/cx88/cx88-video.c
46121+++ b/drivers/media/pci/cx88/cx88-video.c
46122@@ -50,9 +50,9 @@ MODULE_VERSION(CX88_VERSION);
46123
46124 /* ------------------------------------------------------------------ */
46125
46126-static unsigned int video_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
46127-static unsigned int vbi_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
46128-static unsigned int radio_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
46129+static int video_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
46130+static int vbi_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
46131+static int radio_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
46132
46133 module_param_array(video_nr, int, NULL, 0444);
46134 module_param_array(vbi_nr, int, NULL, 0444);
46135diff --git a/drivers/media/pci/ivtv/ivtv-driver.c b/drivers/media/pci/ivtv/ivtv-driver.c
46136index 802642d..5534900 100644
46137--- a/drivers/media/pci/ivtv/ivtv-driver.c
46138+++ b/drivers/media/pci/ivtv/ivtv-driver.c
46139@@ -83,7 +83,7 @@ static struct pci_device_id ivtv_pci_tbl[] = {
46140 MODULE_DEVICE_TABLE(pci,ivtv_pci_tbl);
46141
46142 /* ivtv instance counter */
46143-static atomic_t ivtv_instance = ATOMIC_INIT(0);
46144+static atomic_unchecked_t ivtv_instance = ATOMIC_INIT(0);
46145
46146 /* Parameter declarations */
46147 static int cardtype[IVTV_MAX_CARDS];
46148diff --git a/drivers/media/pci/solo6x10/solo6x10-core.c b/drivers/media/pci/solo6x10/solo6x10-core.c
46149index 570d119..ed25830 100644
46150--- a/drivers/media/pci/solo6x10/solo6x10-core.c
46151+++ b/drivers/media/pci/solo6x10/solo6x10-core.c
46152@@ -424,7 +424,7 @@ static void solo_device_release(struct device *dev)
46153
46154 static int solo_sysfs_init(struct solo_dev *solo_dev)
46155 {
46156- struct bin_attribute *sdram_attr = &solo_dev->sdram_attr;
46157+ bin_attribute_no_const *sdram_attr = &solo_dev->sdram_attr;
46158 struct device *dev = &solo_dev->dev;
46159 const char *driver;
46160 int i;
46161diff --git a/drivers/media/pci/solo6x10/solo6x10-g723.c b/drivers/media/pci/solo6x10/solo6x10-g723.c
46162index 7ddc767..1c24361 100644
46163--- a/drivers/media/pci/solo6x10/solo6x10-g723.c
46164+++ b/drivers/media/pci/solo6x10/solo6x10-g723.c
46165@@ -351,7 +351,7 @@ static int solo_snd_pcm_init(struct solo_dev *solo_dev)
46166
46167 int solo_g723_init(struct solo_dev *solo_dev)
46168 {
46169- static struct snd_device_ops ops = { NULL };
46170+ static struct snd_device_ops ops = { };
46171 struct snd_card *card;
46172 struct snd_kcontrol_new kctl;
46173 char name[32];
46174diff --git a/drivers/media/pci/solo6x10/solo6x10-p2m.c b/drivers/media/pci/solo6x10/solo6x10-p2m.c
46175index 8c84846..27b4f83 100644
46176--- a/drivers/media/pci/solo6x10/solo6x10-p2m.c
46177+++ b/drivers/media/pci/solo6x10/solo6x10-p2m.c
46178@@ -73,7 +73,7 @@ int solo_p2m_dma_desc(struct solo_dev *solo_dev,
46179
46180 /* Get next ID. According to Softlogic, 6110 has problems on !=0 P2M */
46181 if (solo_dev->type != SOLO_DEV_6110 && multi_p2m) {
46182- p2m_id = atomic_inc_return(&solo_dev->p2m_count) % SOLO_NR_P2M;
46183+ p2m_id = atomic_inc_return_unchecked(&solo_dev->p2m_count) % SOLO_NR_P2M;
46184 if (p2m_id < 0)
46185 p2m_id = -p2m_id;
46186 }
46187diff --git a/drivers/media/pci/solo6x10/solo6x10.h b/drivers/media/pci/solo6x10/solo6x10.h
46188index 1ca54b0..7d7cb9a 100644
46189--- a/drivers/media/pci/solo6x10/solo6x10.h
46190+++ b/drivers/media/pci/solo6x10/solo6x10.h
46191@@ -218,7 +218,7 @@ struct solo_dev {
46192
46193 /* P2M DMA Engine */
46194 struct solo_p2m_dev p2m_dev[SOLO_NR_P2M];
46195- atomic_t p2m_count;
46196+ atomic_unchecked_t p2m_count;
46197 int p2m_jiffies;
46198 unsigned int p2m_timeouts;
46199
46200diff --git a/drivers/media/pci/tw68/tw68-core.c b/drivers/media/pci/tw68/tw68-core.c
46201index c135165..dc69499 100644
46202--- a/drivers/media/pci/tw68/tw68-core.c
46203+++ b/drivers/media/pci/tw68/tw68-core.c
46204@@ -60,7 +60,7 @@ static unsigned int card[] = {[0 ... (TW68_MAXBOARDS - 1)] = UNSET };
46205 module_param_array(card, int, NULL, 0444);
46206 MODULE_PARM_DESC(card, "card type");
46207
46208-static atomic_t tw68_instance = ATOMIC_INIT(0);
46209+static atomic_unchecked_t tw68_instance = ATOMIC_INIT(0);
46210
46211 /* ------------------------------------------------------------------ */
46212
46213diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c
46214index ba2d8f9..1566684 100644
46215--- a/drivers/media/platform/omap/omap_vout.c
46216+++ b/drivers/media/platform/omap/omap_vout.c
46217@@ -63,7 +63,6 @@ enum omap_vout_channels {
46218 OMAP_VIDEO2,
46219 };
46220
46221-static struct videobuf_queue_ops video_vbq_ops;
46222 /* Variables configurable through module params*/
46223 static u32 video1_numbuffers = 3;
46224 static u32 video2_numbuffers = 3;
46225@@ -1012,6 +1011,12 @@ static int omap_vout_open(struct file *file)
46226 {
46227 struct videobuf_queue *q;
46228 struct omap_vout_device *vout = NULL;
46229+ static struct videobuf_queue_ops video_vbq_ops = {
46230+ .buf_setup = omap_vout_buffer_setup,
46231+ .buf_prepare = omap_vout_buffer_prepare,
46232+ .buf_release = omap_vout_buffer_release,
46233+ .buf_queue = omap_vout_buffer_queue,
46234+ };
46235
46236 vout = video_drvdata(file);
46237 v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Entering %s\n", __func__);
46238@@ -1029,10 +1034,6 @@ static int omap_vout_open(struct file *file)
46239 vout->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
46240
46241 q = &vout->vbq;
46242- video_vbq_ops.buf_setup = omap_vout_buffer_setup;
46243- video_vbq_ops.buf_prepare = omap_vout_buffer_prepare;
46244- video_vbq_ops.buf_release = omap_vout_buffer_release;
46245- video_vbq_ops.buf_queue = omap_vout_buffer_queue;
46246 spin_lock_init(&vout->vbq_lock);
46247
46248 videobuf_queue_dma_contig_init(q, &video_vbq_ops, q->dev,
46249diff --git a/drivers/media/platform/s5p-tv/mixer.h b/drivers/media/platform/s5p-tv/mixer.h
46250index fb2acc5..a2fcbdc4 100644
46251--- a/drivers/media/platform/s5p-tv/mixer.h
46252+++ b/drivers/media/platform/s5p-tv/mixer.h
46253@@ -156,7 +156,7 @@ struct mxr_layer {
46254 /** layer index (unique identifier) */
46255 int idx;
46256 /** callbacks for layer methods */
46257- struct mxr_layer_ops ops;
46258+ struct mxr_layer_ops *ops;
46259 /** format array */
46260 const struct mxr_format **fmt_array;
46261 /** size of format array */
46262diff --git a/drivers/media/platform/s5p-tv/mixer_grp_layer.c b/drivers/media/platform/s5p-tv/mixer_grp_layer.c
46263index 74344c7..a39e70e 100644
46264--- a/drivers/media/platform/s5p-tv/mixer_grp_layer.c
46265+++ b/drivers/media/platform/s5p-tv/mixer_grp_layer.c
46266@@ -235,7 +235,7 @@ struct mxr_layer *mxr_graph_layer_create(struct mxr_device *mdev, int idx)
46267 {
46268 struct mxr_layer *layer;
46269 int ret;
46270- struct mxr_layer_ops ops = {
46271+ static struct mxr_layer_ops ops = {
46272 .release = mxr_graph_layer_release,
46273 .buffer_set = mxr_graph_buffer_set,
46274 .stream_set = mxr_graph_stream_set,
46275diff --git a/drivers/media/platform/s5p-tv/mixer_reg.c b/drivers/media/platform/s5p-tv/mixer_reg.c
46276index b713403..53cb5ad 100644
46277--- a/drivers/media/platform/s5p-tv/mixer_reg.c
46278+++ b/drivers/media/platform/s5p-tv/mixer_reg.c
46279@@ -276,7 +276,7 @@ static void mxr_irq_layer_handle(struct mxr_layer *layer)
46280 layer->update_buf = next;
46281 }
46282
46283- layer->ops.buffer_set(layer, layer->update_buf);
46284+ layer->ops->buffer_set(layer, layer->update_buf);
46285
46286 if (done && done != layer->shadow_buf)
46287 vb2_buffer_done(&done->vb, VB2_BUF_STATE_DONE);
46288diff --git a/drivers/media/platform/s5p-tv/mixer_video.c b/drivers/media/platform/s5p-tv/mixer_video.c
46289index 72d4f2e..4b2ea0d 100644
46290--- a/drivers/media/platform/s5p-tv/mixer_video.c
46291+++ b/drivers/media/platform/s5p-tv/mixer_video.c
46292@@ -210,7 +210,7 @@ static void mxr_layer_default_geo(struct mxr_layer *layer)
46293 layer->geo.src.height = layer->geo.src.full_height;
46294
46295 mxr_geometry_dump(mdev, &layer->geo);
46296- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
46297+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
46298 mxr_geometry_dump(mdev, &layer->geo);
46299 }
46300
46301@@ -228,7 +228,7 @@ static void mxr_layer_update_output(struct mxr_layer *layer)
46302 layer->geo.dst.full_width = mbus_fmt.width;
46303 layer->geo.dst.full_height = mbus_fmt.height;
46304 layer->geo.dst.field = mbus_fmt.field;
46305- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
46306+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
46307
46308 mxr_geometry_dump(mdev, &layer->geo);
46309 }
46310@@ -334,7 +334,7 @@ static int mxr_s_fmt(struct file *file, void *priv,
46311 /* set source size to highest accepted value */
46312 geo->src.full_width = max(geo->dst.full_width, pix->width);
46313 geo->src.full_height = max(geo->dst.full_height, pix->height);
46314- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
46315+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
46316 mxr_geometry_dump(mdev, &layer->geo);
46317 /* set cropping to total visible screen */
46318 geo->src.width = pix->width;
46319@@ -342,12 +342,12 @@ static int mxr_s_fmt(struct file *file, void *priv,
46320 geo->src.x_offset = 0;
46321 geo->src.y_offset = 0;
46322 /* assure consistency of geometry */
46323- layer->ops.fix_geometry(layer, MXR_GEOMETRY_CROP, MXR_NO_OFFSET);
46324+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_CROP, MXR_NO_OFFSET);
46325 mxr_geometry_dump(mdev, &layer->geo);
46326 /* set full size to lowest possible value */
46327 geo->src.full_width = 0;
46328 geo->src.full_height = 0;
46329- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
46330+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
46331 mxr_geometry_dump(mdev, &layer->geo);
46332
46333 /* returning results */
46334@@ -474,7 +474,7 @@ static int mxr_s_selection(struct file *file, void *fh,
46335 target->width = s->r.width;
46336 target->height = s->r.height;
46337
46338- layer->ops.fix_geometry(layer, stage, s->flags);
46339+ layer->ops->fix_geometry(layer, stage, s->flags);
46340
46341 /* retrieve update selection rectangle */
46342 res.left = target->x_offset;
46343@@ -938,13 +938,13 @@ static int start_streaming(struct vb2_queue *vq, unsigned int count)
46344 mxr_output_get(mdev);
46345
46346 mxr_layer_update_output(layer);
46347- layer->ops.format_set(layer);
46348+ layer->ops->format_set(layer);
46349 /* enabling layer in hardware */
46350 spin_lock_irqsave(&layer->enq_slock, flags);
46351 layer->state = MXR_LAYER_STREAMING;
46352 spin_unlock_irqrestore(&layer->enq_slock, flags);
46353
46354- layer->ops.stream_set(layer, MXR_ENABLE);
46355+ layer->ops->stream_set(layer, MXR_ENABLE);
46356 mxr_streamer_get(mdev);
46357
46358 return 0;
46359@@ -1014,7 +1014,7 @@ static void stop_streaming(struct vb2_queue *vq)
46360 spin_unlock_irqrestore(&layer->enq_slock, flags);
46361
46362 /* disabling layer in hardware */
46363- layer->ops.stream_set(layer, MXR_DISABLE);
46364+ layer->ops->stream_set(layer, MXR_DISABLE);
46365 /* remove one streamer */
46366 mxr_streamer_put(mdev);
46367 /* allow changes in output configuration */
46368@@ -1052,8 +1052,8 @@ void mxr_base_layer_unregister(struct mxr_layer *layer)
46369
46370 void mxr_layer_release(struct mxr_layer *layer)
46371 {
46372- if (layer->ops.release)
46373- layer->ops.release(layer);
46374+ if (layer->ops->release)
46375+ layer->ops->release(layer);
46376 }
46377
46378 void mxr_base_layer_release(struct mxr_layer *layer)
46379@@ -1079,7 +1079,7 @@ struct mxr_layer *mxr_base_layer_create(struct mxr_device *mdev,
46380
46381 layer->mdev = mdev;
46382 layer->idx = idx;
46383- layer->ops = *ops;
46384+ layer->ops = ops;
46385
46386 spin_lock_init(&layer->enq_slock);
46387 INIT_LIST_HEAD(&layer->enq_list);
46388diff --git a/drivers/media/platform/s5p-tv/mixer_vp_layer.c b/drivers/media/platform/s5p-tv/mixer_vp_layer.c
46389index c9388c4..ce71ece 100644
46390--- a/drivers/media/platform/s5p-tv/mixer_vp_layer.c
46391+++ b/drivers/media/platform/s5p-tv/mixer_vp_layer.c
46392@@ -206,7 +206,7 @@ struct mxr_layer *mxr_vp_layer_create(struct mxr_device *mdev, int idx)
46393 {
46394 struct mxr_layer *layer;
46395 int ret;
46396- struct mxr_layer_ops ops = {
46397+ static struct mxr_layer_ops ops = {
46398 .release = mxr_vp_layer_release,
46399 .buffer_set = mxr_vp_buffer_set,
46400 .stream_set = mxr_vp_stream_set,
46401diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
46402index 82affae..42833ec 100644
46403--- a/drivers/media/radio/radio-cadet.c
46404+++ b/drivers/media/radio/radio-cadet.c
46405@@ -333,6 +333,8 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
46406 unsigned char readbuf[RDS_BUFFER];
46407 int i = 0;
46408
46409+ if (count > RDS_BUFFER)
46410+ return -EFAULT;
46411 mutex_lock(&dev->lock);
46412 if (dev->rdsstat == 0)
46413 cadet_start_rds(dev);
46414@@ -349,8 +351,9 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
46415 readbuf[i++] = dev->rdsbuf[dev->rdsout++];
46416 mutex_unlock(&dev->lock);
46417
46418- if (i && copy_to_user(data, readbuf, i))
46419- return -EFAULT;
46420+ if (i > sizeof(readbuf) || (i && copy_to_user(data, readbuf, i)))
46421+ i = -EFAULT;
46422+
46423 return i;
46424 }
46425
46426diff --git a/drivers/media/radio/radio-maxiradio.c b/drivers/media/radio/radio-maxiradio.c
46427index 5236035..c622c74 100644
46428--- a/drivers/media/radio/radio-maxiradio.c
46429+++ b/drivers/media/radio/radio-maxiradio.c
46430@@ -61,7 +61,7 @@ MODULE_PARM_DESC(radio_nr, "Radio device number");
46431 /* TEA5757 pin mappings */
46432 static const int clk = 1, data = 2, wren = 4, mo_st = 8, power = 16;
46433
46434-static atomic_t maxiradio_instance = ATOMIC_INIT(0);
46435+static atomic_unchecked_t maxiradio_instance = ATOMIC_INIT(0);
46436
46437 #define PCI_VENDOR_ID_GUILLEMOT 0x5046
46438 #define PCI_DEVICE_ID_GUILLEMOT_MAXIRADIO 0x1001
46439diff --git a/drivers/media/radio/radio-shark.c b/drivers/media/radio/radio-shark.c
46440index 050b3bb..79f62b9 100644
46441--- a/drivers/media/radio/radio-shark.c
46442+++ b/drivers/media/radio/radio-shark.c
46443@@ -79,7 +79,7 @@ struct shark_device {
46444 u32 last_val;
46445 };
46446
46447-static atomic_t shark_instance = ATOMIC_INIT(0);
46448+static atomic_unchecked_t shark_instance = ATOMIC_INIT(0);
46449
46450 static void shark_write_val(struct snd_tea575x *tea, u32 val)
46451 {
46452diff --git a/drivers/media/radio/radio-shark2.c b/drivers/media/radio/radio-shark2.c
46453index 8654e0d..0608a64 100644
46454--- a/drivers/media/radio/radio-shark2.c
46455+++ b/drivers/media/radio/radio-shark2.c
46456@@ -74,7 +74,7 @@ struct shark_device {
46457 u8 *transfer_buffer;
46458 };
46459
46460-static atomic_t shark_instance = ATOMIC_INIT(0);
46461+static atomic_unchecked_t shark_instance = ATOMIC_INIT(0);
46462
46463 static int shark_write_reg(struct radio_tea5777 *tea, u64 reg)
46464 {
46465diff --git a/drivers/media/radio/radio-si476x.c b/drivers/media/radio/radio-si476x.c
46466index dccf586..d5db411 100644
46467--- a/drivers/media/radio/radio-si476x.c
46468+++ b/drivers/media/radio/radio-si476x.c
46469@@ -1445,7 +1445,7 @@ static int si476x_radio_probe(struct platform_device *pdev)
46470 struct si476x_radio *radio;
46471 struct v4l2_ctrl *ctrl;
46472
46473- static atomic_t instance = ATOMIC_INIT(0);
46474+ static atomic_unchecked_t instance = ATOMIC_INIT(0);
46475
46476 radio = devm_kzalloc(&pdev->dev, sizeof(*radio), GFP_KERNEL);
46477 if (!radio)
46478diff --git a/drivers/media/radio/wl128x/fmdrv_common.c b/drivers/media/radio/wl128x/fmdrv_common.c
46479index 704397f..4d05977 100644
46480--- a/drivers/media/radio/wl128x/fmdrv_common.c
46481+++ b/drivers/media/radio/wl128x/fmdrv_common.c
46482@@ -71,7 +71,7 @@ module_param(default_rds_buf, uint, 0444);
46483 MODULE_PARM_DESC(rds_buf, "RDS buffer entries");
46484
46485 /* Radio Nr */
46486-static u32 radio_nr = -1;
46487+static int radio_nr = -1;
46488 module_param(radio_nr, int, 0444);
46489 MODULE_PARM_DESC(radio_nr, "Radio Nr");
46490
46491diff --git a/drivers/media/usb/dvb-usb/cinergyT2-core.c b/drivers/media/usb/dvb-usb/cinergyT2-core.c
46492index 9fd1527..8927230 100644
46493--- a/drivers/media/usb/dvb-usb/cinergyT2-core.c
46494+++ b/drivers/media/usb/dvb-usb/cinergyT2-core.c
46495@@ -50,29 +50,73 @@ static struct dvb_usb_device_properties cinergyt2_properties;
46496
46497 static int cinergyt2_streaming_ctrl(struct dvb_usb_adapter *adap, int enable)
46498 {
46499- char buf[] = { CINERGYT2_EP1_CONTROL_STREAM_TRANSFER, enable ? 1 : 0 };
46500- char result[64];
46501- return dvb_usb_generic_rw(adap->dev, buf, sizeof(buf), result,
46502- sizeof(result), 0);
46503+ char *buf;
46504+ char *result;
46505+ int retval;
46506+
46507+ buf = kmalloc(2, GFP_KERNEL);
46508+ if (buf == NULL)
46509+ return -ENOMEM;
46510+ result = kmalloc(64, GFP_KERNEL);
46511+ if (result == NULL) {
46512+ kfree(buf);
46513+ return -ENOMEM;
46514+ }
46515+
46516+ buf[0] = CINERGYT2_EP1_CONTROL_STREAM_TRANSFER;
46517+ buf[1] = enable ? 1 : 0;
46518+
46519+ retval = dvb_usb_generic_rw(adap->dev, buf, 2, result, 64, 0);
46520+
46521+ kfree(buf);
46522+ kfree(result);
46523+ return retval;
46524 }
46525
46526 static int cinergyt2_power_ctrl(struct dvb_usb_device *d, int enable)
46527 {
46528- char buf[] = { CINERGYT2_EP1_SLEEP_MODE, enable ? 0 : 1 };
46529- char state[3];
46530- return dvb_usb_generic_rw(d, buf, sizeof(buf), state, sizeof(state), 0);
46531+ char *buf;
46532+ char *state;
46533+ int retval;
46534+
46535+ buf = kmalloc(2, GFP_KERNEL);
46536+ if (buf == NULL)
46537+ return -ENOMEM;
46538+ state = kmalloc(3, GFP_KERNEL);
46539+ if (state == NULL) {
46540+ kfree(buf);
46541+ return -ENOMEM;
46542+ }
46543+
46544+ buf[0] = CINERGYT2_EP1_SLEEP_MODE;
46545+ buf[1] = enable ? 1 : 0;
46546+
46547+ retval = dvb_usb_generic_rw(d, buf, 2, state, 3, 0);
46548+
46549+ kfree(buf);
46550+ kfree(state);
46551+ return retval;
46552 }
46553
46554 static int cinergyt2_frontend_attach(struct dvb_usb_adapter *adap)
46555 {
46556- char query[] = { CINERGYT2_EP1_GET_FIRMWARE_VERSION };
46557- char state[3];
46558+ char *query;
46559+ char *state;
46560 int ret;
46561+ query = kmalloc(1, GFP_KERNEL);
46562+ if (query == NULL)
46563+ return -ENOMEM;
46564+ state = kmalloc(3, GFP_KERNEL);
46565+ if (state == NULL) {
46566+ kfree(query);
46567+ return -ENOMEM;
46568+ }
46569+
46570+ query[0] = CINERGYT2_EP1_GET_FIRMWARE_VERSION;
46571
46572 adap->fe_adap[0].fe = cinergyt2_fe_attach(adap->dev);
46573
46574- ret = dvb_usb_generic_rw(adap->dev, query, sizeof(query), state,
46575- sizeof(state), 0);
46576+ ret = dvb_usb_generic_rw(adap->dev, query, 1, state, 3, 0);
46577 if (ret < 0) {
46578 deb_rc("cinergyt2_power_ctrl() Failed to retrieve sleep "
46579 "state info\n");
46580@@ -80,7 +124,8 @@ static int cinergyt2_frontend_attach(struct dvb_usb_adapter *adap)
46581
46582 /* Copy this pointer as we are gonna need it in the release phase */
46583 cinergyt2_usb_device = adap->dev;
46584-
46585+ kfree(query);
46586+ kfree(state);
46587 return 0;
46588 }
46589
46590@@ -141,12 +186,23 @@ static int repeatable_keys[] = {
46591 static int cinergyt2_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
46592 {
46593 struct cinergyt2_state *st = d->priv;
46594- u8 key[5] = {0, 0, 0, 0, 0}, cmd = CINERGYT2_EP1_GET_RC_EVENTS;
46595+ u8 *key, *cmd;
46596 int i;
46597
46598+ cmd = kmalloc(1, GFP_KERNEL);
46599+ if (cmd == NULL)
46600+ return -EINVAL;
46601+ key = kzalloc(5, GFP_KERNEL);
46602+ if (key == NULL) {
46603+ kfree(cmd);
46604+ return -EINVAL;
46605+ }
46606+
46607+ cmd[0] = CINERGYT2_EP1_GET_RC_EVENTS;
46608+
46609 *state = REMOTE_NO_KEY_PRESSED;
46610
46611- dvb_usb_generic_rw(d, &cmd, 1, key, sizeof(key), 0);
46612+ dvb_usb_generic_rw(d, cmd, 1, key, 5, 0);
46613 if (key[4] == 0xff) {
46614 /* key repeat */
46615 st->rc_counter++;
46616@@ -157,12 +213,12 @@ static int cinergyt2_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
46617 *event = d->last_event;
46618 deb_rc("repeat key, event %x\n",
46619 *event);
46620- return 0;
46621+ goto out;
46622 }
46623 }
46624 deb_rc("repeated key (non repeatable)\n");
46625 }
46626- return 0;
46627+ goto out;
46628 }
46629
46630 /* hack to pass checksum on the custom field */
46631@@ -174,6 +230,9 @@ static int cinergyt2_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
46632
46633 deb_rc("key: %*ph\n", 5, key);
46634 }
46635+out:
46636+ kfree(cmd);
46637+ kfree(key);
46638 return 0;
46639 }
46640
46641diff --git a/drivers/media/usb/dvb-usb/cinergyT2-fe.c b/drivers/media/usb/dvb-usb/cinergyT2-fe.c
46642index c890fe4..f9b2ae6 100644
46643--- a/drivers/media/usb/dvb-usb/cinergyT2-fe.c
46644+++ b/drivers/media/usb/dvb-usb/cinergyT2-fe.c
46645@@ -145,103 +145,176 @@ static int cinergyt2_fe_read_status(struct dvb_frontend *fe,
46646 fe_status_t *status)
46647 {
46648 struct cinergyt2_fe_state *state = fe->demodulator_priv;
46649- struct dvbt_get_status_msg result;
46650- u8 cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
46651+ struct dvbt_get_status_msg *result;
46652+ u8 *cmd;
46653 int ret;
46654
46655- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (u8 *)&result,
46656- sizeof(result), 0);
46657+ cmd = kmalloc(1, GFP_KERNEL);
46658+ if (cmd == NULL)
46659+ return -ENOMEM;
46660+ result = kmalloc(sizeof(*result), GFP_KERNEL);
46661+ if (result == NULL) {
46662+ kfree(cmd);
46663+ return -ENOMEM;
46664+ }
46665+
46666+ cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
46667+
46668+ ret = dvb_usb_generic_rw(state->d, cmd, 1, (u8 *)result,
46669+ sizeof(*result), 0);
46670 if (ret < 0)
46671- return ret;
46672+ goto out;
46673
46674 *status = 0;
46675
46676- if (0xffff - le16_to_cpu(result.gain) > 30)
46677+ if (0xffff - le16_to_cpu(result->gain) > 30)
46678 *status |= FE_HAS_SIGNAL;
46679- if (result.lock_bits & (1 << 6))
46680+ if (result->lock_bits & (1 << 6))
46681 *status |= FE_HAS_LOCK;
46682- if (result.lock_bits & (1 << 5))
46683+ if (result->lock_bits & (1 << 5))
46684 *status |= FE_HAS_SYNC;
46685- if (result.lock_bits & (1 << 4))
46686+ if (result->lock_bits & (1 << 4))
46687 *status |= FE_HAS_CARRIER;
46688- if (result.lock_bits & (1 << 1))
46689+ if (result->lock_bits & (1 << 1))
46690 *status |= FE_HAS_VITERBI;
46691
46692 if ((*status & (FE_HAS_CARRIER | FE_HAS_VITERBI | FE_HAS_SYNC)) !=
46693 (FE_HAS_CARRIER | FE_HAS_VITERBI | FE_HAS_SYNC))
46694 *status &= ~FE_HAS_LOCK;
46695
46696- return 0;
46697+out:
46698+ kfree(cmd);
46699+ kfree(result);
46700+ return ret;
46701 }
46702
46703 static int cinergyt2_fe_read_ber(struct dvb_frontend *fe, u32 *ber)
46704 {
46705 struct cinergyt2_fe_state *state = fe->demodulator_priv;
46706- struct dvbt_get_status_msg status;
46707- char cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
46708+ struct dvbt_get_status_msg *status;
46709+ char *cmd;
46710 int ret;
46711
46712- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (char *)&status,
46713- sizeof(status), 0);
46714+ cmd = kmalloc(1, GFP_KERNEL);
46715+ if (cmd == NULL)
46716+ return -ENOMEM;
46717+ status = kmalloc(sizeof(*status), GFP_KERNEL);
46718+ if (status == NULL) {
46719+ kfree(cmd);
46720+ return -ENOMEM;
46721+ }
46722+
46723+ cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
46724+
46725+ ret = dvb_usb_generic_rw(state->d, cmd, 1, (char *)status,
46726+ sizeof(*status), 0);
46727 if (ret < 0)
46728- return ret;
46729+ goto out;
46730
46731- *ber = le32_to_cpu(status.viterbi_error_rate);
46732+ *ber = le32_to_cpu(status->viterbi_error_rate);
46733+out:
46734+ kfree(cmd);
46735+ kfree(status);
46736 return 0;
46737 }
46738
46739 static int cinergyt2_fe_read_unc_blocks(struct dvb_frontend *fe, u32 *unc)
46740 {
46741 struct cinergyt2_fe_state *state = fe->demodulator_priv;
46742- struct dvbt_get_status_msg status;
46743- u8 cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
46744+ struct dvbt_get_status_msg *status;
46745+ u8 *cmd;
46746 int ret;
46747
46748- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (u8 *)&status,
46749- sizeof(status), 0);
46750+ cmd = kmalloc(1, GFP_KERNEL);
46751+ if (cmd == NULL)
46752+ return -ENOMEM;
46753+ status = kmalloc(sizeof(*status), GFP_KERNEL);
46754+ if (status == NULL) {
46755+ kfree(cmd);
46756+ return -ENOMEM;
46757+ }
46758+
46759+ cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
46760+
46761+ ret = dvb_usb_generic_rw(state->d, cmd, 1, (u8 *)status,
46762+ sizeof(*status), 0);
46763 if (ret < 0) {
46764 err("cinergyt2_fe_read_unc_blocks() Failed! (Error=%d)\n",
46765 ret);
46766- return ret;
46767+ goto out;
46768 }
46769- *unc = le32_to_cpu(status.uncorrected_block_count);
46770- return 0;
46771+ *unc = le32_to_cpu(status->uncorrected_block_count);
46772+
46773+out:
46774+ kfree(cmd);
46775+ kfree(status);
46776+ return ret;
46777 }
46778
46779 static int cinergyt2_fe_read_signal_strength(struct dvb_frontend *fe,
46780 u16 *strength)
46781 {
46782 struct cinergyt2_fe_state *state = fe->demodulator_priv;
46783- struct dvbt_get_status_msg status;
46784- char cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
46785+ struct dvbt_get_status_msg *status;
46786+ char *cmd;
46787 int ret;
46788
46789- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (char *)&status,
46790- sizeof(status), 0);
46791+ cmd = kmalloc(1, GFP_KERNEL);
46792+ if (cmd == NULL)
46793+ return -ENOMEM;
46794+ status = kmalloc(sizeof(*status), GFP_KERNEL);
46795+ if (status == NULL) {
46796+ kfree(cmd);
46797+ return -ENOMEM;
46798+ }
46799+
46800+ cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
46801+
46802+ ret = dvb_usb_generic_rw(state->d, cmd, 1, (char *)status,
46803+ sizeof(*status), 0);
46804 if (ret < 0) {
46805 err("cinergyt2_fe_read_signal_strength() Failed!"
46806 " (Error=%d)\n", ret);
46807- return ret;
46808+ goto out;
46809 }
46810- *strength = (0xffff - le16_to_cpu(status.gain));
46811+ *strength = (0xffff - le16_to_cpu(status->gain));
46812+
46813+out:
46814+ kfree(cmd);
46815+ kfree(status);
46816 return 0;
46817 }
46818
46819 static int cinergyt2_fe_read_snr(struct dvb_frontend *fe, u16 *snr)
46820 {
46821 struct cinergyt2_fe_state *state = fe->demodulator_priv;
46822- struct dvbt_get_status_msg status;
46823- char cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
46824+ struct dvbt_get_status_msg *status;
46825+ char *cmd;
46826 int ret;
46827
46828- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (char *)&status,
46829- sizeof(status), 0);
46830+ cmd = kmalloc(1, GFP_KERNEL);
46831+ if (cmd == NULL)
46832+ return -ENOMEM;
46833+ status = kmalloc(sizeof(*status), GFP_KERNEL);
46834+ if (status == NULL) {
46835+ kfree(cmd);
46836+ return -ENOMEM;
46837+ }
46838+
46839+ cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
46840+
46841+ ret = dvb_usb_generic_rw(state->d, cmd, 1, (char *)status,
46842+ sizeof(*status), 0);
46843 if (ret < 0) {
46844 err("cinergyt2_fe_read_snr() Failed! (Error=%d)\n", ret);
46845- return ret;
46846+ goto out;
46847 }
46848- *snr = (status.snr << 8) | status.snr;
46849- return 0;
46850+ *snr = (status->snr << 8) | status->snr;
46851+
46852+out:
46853+ kfree(cmd);
46854+ kfree(status);
46855+ return ret;
46856 }
46857
46858 static int cinergyt2_fe_init(struct dvb_frontend *fe)
46859@@ -266,35 +339,46 @@ static int cinergyt2_fe_set_frontend(struct dvb_frontend *fe)
46860 {
46861 struct dtv_frontend_properties *fep = &fe->dtv_property_cache;
46862 struct cinergyt2_fe_state *state = fe->demodulator_priv;
46863- struct dvbt_set_parameters_msg param;
46864- char result[2];
46865+ struct dvbt_set_parameters_msg *param;
46866+ char *result;
46867 int err;
46868
46869- param.cmd = CINERGYT2_EP1_SET_TUNER_PARAMETERS;
46870- param.tps = cpu_to_le16(compute_tps(fep));
46871- param.freq = cpu_to_le32(fep->frequency / 1000);
46872- param.flags = 0;
46873+ result = kmalloc(2, GFP_KERNEL);
46874+ if (result == NULL)
46875+ return -ENOMEM;
46876+ param = kmalloc(sizeof(*param), GFP_KERNEL);
46877+ if (param == NULL) {
46878+ kfree(result);
46879+ return -ENOMEM;
46880+ }
46881+
46882+ param->cmd = CINERGYT2_EP1_SET_TUNER_PARAMETERS;
46883+ param->tps = cpu_to_le16(compute_tps(fep));
46884+ param->freq = cpu_to_le32(fep->frequency / 1000);
46885+ param->flags = 0;
46886
46887 switch (fep->bandwidth_hz) {
46888 default:
46889 case 8000000:
46890- param.bandwidth = 8;
46891+ param->bandwidth = 8;
46892 break;
46893 case 7000000:
46894- param.bandwidth = 7;
46895+ param->bandwidth = 7;
46896 break;
46897 case 6000000:
46898- param.bandwidth = 6;
46899+ param->bandwidth = 6;
46900 break;
46901 }
46902
46903 err = dvb_usb_generic_rw(state->d,
46904- (char *)&param, sizeof(param),
46905- result, sizeof(result), 0);
46906+ (char *)param, sizeof(*param),
46907+ result, 2, 0);
46908 if (err < 0)
46909 err("cinergyt2_fe_set_frontend() Failed! err=%d\n", err);
46910
46911- return (err < 0) ? err : 0;
46912+ kfree(result);
46913+ kfree(param);
46914+ return err;
46915 }
46916
46917 static void cinergyt2_fe_release(struct dvb_frontend *fe)
46918diff --git a/drivers/media/usb/dvb-usb/dvb-usb-firmware.c b/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
46919index 733a7ff..f8b52e3 100644
46920--- a/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
46921+++ b/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
46922@@ -35,42 +35,57 @@ static int usb_cypress_writemem(struct usb_device *udev,u16 addr,u8 *data, u8 le
46923
46924 int usb_cypress_load_firmware(struct usb_device *udev, const struct firmware *fw, int type)
46925 {
46926- struct hexline hx;
46927- u8 reset;
46928+ struct hexline *hx;
46929+ u8 *reset;
46930 int ret,pos=0;
46931
46932+ reset = kmalloc(1, GFP_KERNEL);
46933+ if (reset == NULL)
46934+ return -ENOMEM;
46935+
46936+ hx = kmalloc(sizeof(struct hexline), GFP_KERNEL);
46937+ if (hx == NULL) {
46938+ kfree(reset);
46939+ return -ENOMEM;
46940+ }
46941+
46942 /* stop the CPU */
46943- reset = 1;
46944- if ((ret = usb_cypress_writemem(udev,cypress[type].cpu_cs_register,&reset,1)) != 1)
46945+ reset[0] = 1;
46946+ if ((ret = usb_cypress_writemem(udev,cypress[type].cpu_cs_register,reset,1)) != 1)
46947 err("could not stop the USB controller CPU.");
46948
46949- while ((ret = dvb_usb_get_hexline(fw,&hx,&pos)) > 0) {
46950- deb_fw("writing to address 0x%04x (buffer: 0x%02x %02x)\n",hx.addr,hx.len,hx.chk);
46951- ret = usb_cypress_writemem(udev,hx.addr,hx.data,hx.len);
46952+ while ((ret = dvb_usb_get_hexline(fw,hx,&pos)) > 0) {
46953+ deb_fw("writing to address 0x%04x (buffer: 0x%02x %02x)\n",hx->addr,hx->len,hx->chk);
46954+ ret = usb_cypress_writemem(udev,hx->addr,hx->data,hx->len);
46955
46956- if (ret != hx.len) {
46957+ if (ret != hx->len) {
46958 err("error while transferring firmware "
46959 "(transferred size: %d, block size: %d)",
46960- ret,hx.len);
46961+ ret,hx->len);
46962 ret = -EINVAL;
46963 break;
46964 }
46965 }
46966 if (ret < 0) {
46967 err("firmware download failed at %d with %d",pos,ret);
46968+ kfree(reset);
46969+ kfree(hx);
46970 return ret;
46971 }
46972
46973 if (ret == 0) {
46974 /* restart the CPU */
46975- reset = 0;
46976- if (ret || usb_cypress_writemem(udev,cypress[type].cpu_cs_register,&reset,1) != 1) {
46977+ reset[0] = 0;
46978+ if (ret || usb_cypress_writemem(udev,cypress[type].cpu_cs_register,reset,1) != 1) {
46979 err("could not restart the USB controller CPU.");
46980 ret = -EINVAL;
46981 }
46982 } else
46983 ret = -EIO;
46984
46985+ kfree(reset);
46986+ kfree(hx);
46987+
46988 return ret;
46989 }
46990 EXPORT_SYMBOL(usb_cypress_load_firmware);
46991diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c
46992index 1a3df10..57997a5 100644
46993--- a/drivers/media/usb/dvb-usb/dw2102.c
46994+++ b/drivers/media/usb/dvb-usb/dw2102.c
46995@@ -118,7 +118,7 @@ struct su3000_state {
46996
46997 struct s6x0_state {
46998 int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
46999-};
47000+} __no_const;
47001
47002 /* debug */
47003 static int dvb_usb_dw2102_debug;
47004diff --git a/drivers/media/usb/dvb-usb/technisat-usb2.c b/drivers/media/usb/dvb-usb/technisat-usb2.c
47005index 5801ae7..83f71fa 100644
47006--- a/drivers/media/usb/dvb-usb/technisat-usb2.c
47007+++ b/drivers/media/usb/dvb-usb/technisat-usb2.c
47008@@ -87,8 +87,11 @@ struct technisat_usb2_state {
47009 static int technisat_usb2_i2c_access(struct usb_device *udev,
47010 u8 device_addr, u8 *tx, u8 txlen, u8 *rx, u8 rxlen)
47011 {
47012- u8 b[64];
47013- int ret, actual_length;
47014+ u8 *b = kmalloc(64, GFP_KERNEL);
47015+ int ret, actual_length, error = 0;
47016+
47017+ if (b == NULL)
47018+ return -ENOMEM;
47019
47020 deb_i2c("i2c-access: %02x, tx: ", device_addr);
47021 debug_dump(tx, txlen, deb_i2c);
47022@@ -121,7 +124,8 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
47023
47024 if (ret < 0) {
47025 err("i2c-error: out failed %02x = %d", device_addr, ret);
47026- return -ENODEV;
47027+ error = -ENODEV;
47028+ goto out;
47029 }
47030
47031 ret = usb_bulk_msg(udev,
47032@@ -129,7 +133,8 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
47033 b, 64, &actual_length, 1000);
47034 if (ret < 0) {
47035 err("i2c-error: in failed %02x = %d", device_addr, ret);
47036- return -ENODEV;
47037+ error = -ENODEV;
47038+ goto out;
47039 }
47040
47041 if (b[0] != I2C_STATUS_OK) {
47042@@ -137,8 +142,10 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
47043 /* handle tuner-i2c-nak */
47044 if (!(b[0] == I2C_STATUS_NAK &&
47045 device_addr == 0x60
47046- /* && device_is_technisat_usb2 */))
47047- return -ENODEV;
47048+ /* && device_is_technisat_usb2 */)) {
47049+ error = -ENODEV;
47050+ goto out;
47051+ }
47052 }
47053
47054 deb_i2c("status: %d, ", b[0]);
47055@@ -152,7 +159,9 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
47056
47057 deb_i2c("\n");
47058
47059- return 0;
47060+out:
47061+ kfree(b);
47062+ return error;
47063 }
47064
47065 static int technisat_usb2_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msg,
47066@@ -224,14 +233,16 @@ static int technisat_usb2_set_led(struct dvb_usb_device *d, int red, enum techni
47067 {
47068 int ret;
47069
47070- u8 led[8] = {
47071- red ? SET_RED_LED_VENDOR_REQUEST : SET_GREEN_LED_VENDOR_REQUEST,
47072- 0
47073- };
47074+ u8 *led = kzalloc(8, GFP_KERNEL);
47075+
47076+ if (led == NULL)
47077+ return -ENOMEM;
47078
47079 if (disable_led_control && state != TECH_LED_OFF)
47080 return 0;
47081
47082+ led[0] = red ? SET_RED_LED_VENDOR_REQUEST : SET_GREEN_LED_VENDOR_REQUEST;
47083+
47084 switch (state) {
47085 case TECH_LED_ON:
47086 led[1] = 0x82;
47087@@ -263,16 +274,22 @@ static int technisat_usb2_set_led(struct dvb_usb_device *d, int red, enum techni
47088 red ? SET_RED_LED_VENDOR_REQUEST : SET_GREEN_LED_VENDOR_REQUEST,
47089 USB_TYPE_VENDOR | USB_DIR_OUT,
47090 0, 0,
47091- led, sizeof(led), 500);
47092+ led, 8, 500);
47093
47094 mutex_unlock(&d->i2c_mutex);
47095+
47096+ kfree(led);
47097+
47098 return ret;
47099 }
47100
47101 static int technisat_usb2_set_led_timer(struct dvb_usb_device *d, u8 red, u8 green)
47102 {
47103 int ret;
47104- u8 b = 0;
47105+ u8 *b = kzalloc(1, GFP_KERNEL);
47106+
47107+ if (b == NULL)
47108+ return -ENOMEM;
47109
47110 if (mutex_lock_interruptible(&d->i2c_mutex) < 0)
47111 return -EAGAIN;
47112@@ -281,10 +298,12 @@ static int technisat_usb2_set_led_timer(struct dvb_usb_device *d, u8 red, u8 gre
47113 SET_LED_TIMER_DIVIDER_VENDOR_REQUEST,
47114 USB_TYPE_VENDOR | USB_DIR_OUT,
47115 (red << 8) | green, 0,
47116- &b, 1, 500);
47117+ b, 1, 500);
47118
47119 mutex_unlock(&d->i2c_mutex);
47120
47121+ kfree(b);
47122+
47123 return ret;
47124 }
47125
47126@@ -328,7 +347,7 @@ static int technisat_usb2_identify_state(struct usb_device *udev,
47127 struct dvb_usb_device_description **desc, int *cold)
47128 {
47129 int ret;
47130- u8 version[3];
47131+ u8 *version = kmalloc(3, GFP_KERNEL);
47132
47133 /* first select the interface */
47134 if (usb_set_interface(udev, 0, 1) != 0)
47135@@ -338,11 +357,14 @@ static int technisat_usb2_identify_state(struct usb_device *udev,
47136
47137 *cold = 0; /* by default do not download a firmware - just in case something is wrong */
47138
47139+ if (version == NULL)
47140+ return 0;
47141+
47142 ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
47143 GET_VERSION_INFO_VENDOR_REQUEST,
47144 USB_TYPE_VENDOR | USB_DIR_IN,
47145 0, 0,
47146- version, sizeof(version), 500);
47147+ version, 3, 500);
47148
47149 if (ret < 0)
47150 *cold = 1;
47151@@ -351,6 +373,8 @@ static int technisat_usb2_identify_state(struct usb_device *udev,
47152 *cold = 0;
47153 }
47154
47155+ kfree(version);
47156+
47157 return 0;
47158 }
47159
47160@@ -594,10 +618,15 @@ static int technisat_usb2_frontend_attach(struct dvb_usb_adapter *a)
47161
47162 static int technisat_usb2_get_ir(struct dvb_usb_device *d)
47163 {
47164- u8 buf[62], *b;
47165+ u8 *buf, *b;
47166 int ret;
47167 struct ir_raw_event ev;
47168
47169+ buf = kmalloc(62, GFP_KERNEL);
47170+
47171+ if (buf == NULL)
47172+ return -ENOMEM;
47173+
47174 buf[0] = GET_IR_DATA_VENDOR_REQUEST;
47175 buf[1] = 0x08;
47176 buf[2] = 0x8f;
47177@@ -620,16 +649,20 @@ static int technisat_usb2_get_ir(struct dvb_usb_device *d)
47178 GET_IR_DATA_VENDOR_REQUEST,
47179 USB_TYPE_VENDOR | USB_DIR_IN,
47180 0x8080, 0,
47181- buf, sizeof(buf), 500);
47182+ buf, 62, 500);
47183
47184 unlock:
47185 mutex_unlock(&d->i2c_mutex);
47186
47187- if (ret < 0)
47188+ if (ret < 0) {
47189+ kfree(buf);
47190 return ret;
47191+ }
47192
47193- if (ret == 1)
47194+ if (ret == 1) {
47195+ kfree(buf);
47196 return 0; /* no key pressed */
47197+ }
47198
47199 /* decoding */
47200 b = buf+1;
47201@@ -656,6 +689,8 @@ unlock:
47202
47203 ir_raw_event_handle(d->rc_dev);
47204
47205+ kfree(buf);
47206+
47207 return 1;
47208 }
47209
47210diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
47211index af63543..0436f20 100644
47212--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
47213+++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
47214@@ -429,7 +429,7 @@ static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
47215 * by passing a very big num_planes value */
47216 uplane = compat_alloc_user_space(num_planes *
47217 sizeof(struct v4l2_plane));
47218- kp->m.planes = (__force struct v4l2_plane *)uplane;
47219+ kp->m.planes = (__force_kernel struct v4l2_plane *)uplane;
47220
47221 while (--num_planes >= 0) {
47222 ret = get_v4l2_plane32(uplane, uplane32, kp->memory);
47223@@ -500,7 +500,7 @@ static int put_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
47224 if (num_planes == 0)
47225 return 0;
47226
47227- uplane = (__force struct v4l2_plane __user *)kp->m.planes;
47228+ uplane = (struct v4l2_plane __force_user *)kp->m.planes;
47229 if (get_user(p, &up->m.planes))
47230 return -EFAULT;
47231 uplane32 = compat_ptr(p);
47232@@ -564,7 +564,7 @@ static int get_v4l2_framebuffer32(struct v4l2_framebuffer *kp, struct v4l2_frame
47233 get_user(kp->flags, &up->flags) ||
47234 copy_from_user(&kp->fmt, &up->fmt, sizeof(up->fmt)))
47235 return -EFAULT;
47236- kp->base = (__force void *)compat_ptr(tmp);
47237+ kp->base = (__force_kernel void *)compat_ptr(tmp);
47238 return 0;
47239 }
47240
47241@@ -669,7 +669,7 @@ static int get_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext
47242 n * sizeof(struct v4l2_ext_control32)))
47243 return -EFAULT;
47244 kcontrols = compat_alloc_user_space(n * sizeof(struct v4l2_ext_control));
47245- kp->controls = (__force struct v4l2_ext_control *)kcontrols;
47246+ kp->controls = (__force_kernel struct v4l2_ext_control *)kcontrols;
47247 while (--n >= 0) {
47248 u32 id;
47249
47250@@ -696,7 +696,7 @@ static int put_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext
47251 {
47252 struct v4l2_ext_control32 __user *ucontrols;
47253 struct v4l2_ext_control __user *kcontrols =
47254- (__force struct v4l2_ext_control __user *)kp->controls;
47255+ (struct v4l2_ext_control __force_user *)kp->controls;
47256 int n = kp->count;
47257 compat_caddr_t p;
47258
47259@@ -780,7 +780,7 @@ static int get_v4l2_edid32(struct v4l2_edid *kp, struct v4l2_edid32 __user *up)
47260 get_user(tmp, &up->edid) ||
47261 copy_from_user(kp->reserved, up->reserved, sizeof(kp->reserved)))
47262 return -EFAULT;
47263- kp->edid = (__force u8 *)compat_ptr(tmp);
47264+ kp->edid = (__force_kernel u8 *)compat_ptr(tmp);
47265 return 0;
47266 }
47267
47268diff --git a/drivers/media/v4l2-core/v4l2-device.c b/drivers/media/v4l2-core/v4l2-device.c
47269index 015f92a..59e311e 100644
47270--- a/drivers/media/v4l2-core/v4l2-device.c
47271+++ b/drivers/media/v4l2-core/v4l2-device.c
47272@@ -75,9 +75,9 @@ int v4l2_device_put(struct v4l2_device *v4l2_dev)
47273 EXPORT_SYMBOL_GPL(v4l2_device_put);
47274
47275 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
47276- atomic_t *instance)
47277+ atomic_unchecked_t *instance)
47278 {
47279- int num = atomic_inc_return(instance) - 1;
47280+ int num = atomic_inc_return_unchecked(instance) - 1;
47281 int len = strlen(basename);
47282
47283 if (basename[len - 1] >= '0' && basename[len - 1] <= '9')
47284diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
47285index b084072..36706d7 100644
47286--- a/drivers/media/v4l2-core/v4l2-ioctl.c
47287+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
47288@@ -2151,7 +2151,8 @@ struct v4l2_ioctl_info {
47289 struct file *file, void *fh, void *p);
47290 } u;
47291 void (*debug)(const void *arg, bool write_only);
47292-};
47293+} __do_const;
47294+typedef struct v4l2_ioctl_info __no_const v4l2_ioctl_info_no_const;
47295
47296 /* This control needs a priority check */
47297 #define INFO_FL_PRIO (1 << 0)
47298@@ -2335,7 +2336,7 @@ static long __video_do_ioctl(struct file *file,
47299 struct video_device *vfd = video_devdata(file);
47300 const struct v4l2_ioctl_ops *ops = vfd->ioctl_ops;
47301 bool write_only = false;
47302- struct v4l2_ioctl_info default_info;
47303+ v4l2_ioctl_info_no_const default_info;
47304 const struct v4l2_ioctl_info *info;
47305 void *fh = file->private_data;
47306 struct v4l2_fh *vfh = NULL;
47307@@ -2426,7 +2427,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
47308 ret = -EINVAL;
47309 break;
47310 }
47311- *user_ptr = (void __user *)buf->m.planes;
47312+ *user_ptr = (void __force_user *)buf->m.planes;
47313 *kernel_ptr = (void **)&buf->m.planes;
47314 *array_size = sizeof(struct v4l2_plane) * buf->length;
47315 ret = 1;
47316@@ -2443,7 +2444,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
47317 ret = -EINVAL;
47318 break;
47319 }
47320- *user_ptr = (void __user *)edid->edid;
47321+ *user_ptr = (void __force_user *)edid->edid;
47322 *kernel_ptr = (void **)&edid->edid;
47323 *array_size = edid->blocks * 128;
47324 ret = 1;
47325@@ -2461,7 +2462,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
47326 ret = -EINVAL;
47327 break;
47328 }
47329- *user_ptr = (void __user *)ctrls->controls;
47330+ *user_ptr = (void __force_user *)ctrls->controls;
47331 *kernel_ptr = (void **)&ctrls->controls;
47332 *array_size = sizeof(struct v4l2_ext_control)
47333 * ctrls->count;
47334@@ -2562,7 +2563,7 @@ video_usercopy(struct file *file, unsigned int cmd, unsigned long arg,
47335 }
47336
47337 if (has_array_args) {
47338- *kernel_ptr = (void __force *)user_ptr;
47339+ *kernel_ptr = (void __force_kernel *)user_ptr;
47340 if (copy_to_user(user_ptr, mbuf, array_size))
47341 err = -EFAULT;
47342 goto out_array_args;
47343diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c
47344index 24696f5..3637780 100644
47345--- a/drivers/memory/omap-gpmc.c
47346+++ b/drivers/memory/omap-gpmc.c
47347@@ -211,7 +211,6 @@ struct omap3_gpmc_regs {
47348 };
47349
47350 static struct gpmc_client_irq gpmc_client_irq[GPMC_NR_IRQ];
47351-static struct irq_chip gpmc_irq_chip;
47352 static int gpmc_irq_start;
47353
47354 static struct resource gpmc_mem_root;
47355@@ -939,6 +938,17 @@ static void gpmc_irq_noop(struct irq_data *data) { }
47356
47357 static unsigned int gpmc_irq_noop_ret(struct irq_data *data) { return 0; }
47358
47359+static struct irq_chip gpmc_irq_chip = {
47360+ .name = "gpmc",
47361+ .irq_startup = gpmc_irq_noop_ret,
47362+ .irq_enable = gpmc_irq_enable,
47363+ .irq_disable = gpmc_irq_disable,
47364+ .irq_shutdown = gpmc_irq_noop,
47365+ .irq_ack = gpmc_irq_noop,
47366+ .irq_mask = gpmc_irq_noop,
47367+ .irq_unmask = gpmc_irq_noop,
47368+};
47369+
47370 static int gpmc_setup_irq(void)
47371 {
47372 int i;
47373@@ -953,15 +963,6 @@ static int gpmc_setup_irq(void)
47374 return gpmc_irq_start;
47375 }
47376
47377- gpmc_irq_chip.name = "gpmc";
47378- gpmc_irq_chip.irq_startup = gpmc_irq_noop_ret;
47379- gpmc_irq_chip.irq_enable = gpmc_irq_enable;
47380- gpmc_irq_chip.irq_disable = gpmc_irq_disable;
47381- gpmc_irq_chip.irq_shutdown = gpmc_irq_noop;
47382- gpmc_irq_chip.irq_ack = gpmc_irq_noop;
47383- gpmc_irq_chip.irq_mask = gpmc_irq_noop;
47384- gpmc_irq_chip.irq_unmask = gpmc_irq_noop;
47385-
47386 gpmc_client_irq[0].bitmask = GPMC_IRQ_FIFOEVENTENABLE;
47387 gpmc_client_irq[1].bitmask = GPMC_IRQ_COUNT_EVENT;
47388
47389diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
47390index 187f836..679544b 100644
47391--- a/drivers/message/fusion/mptbase.c
47392+++ b/drivers/message/fusion/mptbase.c
47393@@ -6746,8 +6746,13 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
47394 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
47395 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
47396
47397+#ifdef CONFIG_GRKERNSEC_HIDESYM
47398+ seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
47399+#else
47400 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
47401 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
47402+#endif
47403+
47404 /*
47405 * Rounding UP to nearest 4-kB boundary here...
47406 */
47407@@ -6760,7 +6765,11 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
47408 ioc->facts.GlobalCredits);
47409
47410 seq_printf(m, " Frames @ 0x%p (Dma @ 0x%p)\n",
47411+#ifdef CONFIG_GRKERNSEC_HIDESYM
47412+ NULL, NULL);
47413+#else
47414 (void *)ioc->alloc, (void *)(ulong)ioc->alloc_dma);
47415+#endif
47416 sz = (ioc->reply_sz * ioc->reply_depth) + 128;
47417 seq_printf(m, " {CurRepSz=%d} x {CurRepDepth=%d} = %d bytes ^= 0x%x\n",
47418 ioc->reply_sz, ioc->reply_depth, ioc->reply_sz*ioc->reply_depth, sz);
47419diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
47420index 5bdaae1..eced16f 100644
47421--- a/drivers/message/fusion/mptsas.c
47422+++ b/drivers/message/fusion/mptsas.c
47423@@ -446,6 +446,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
47424 return 0;
47425 }
47426
47427+static inline void
47428+mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
47429+{
47430+ if (phy_info->port_details) {
47431+ phy_info->port_details->rphy = rphy;
47432+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
47433+ ioc->name, rphy));
47434+ }
47435+
47436+ if (rphy) {
47437+ dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
47438+ &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
47439+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
47440+ ioc->name, rphy, rphy->dev.release));
47441+ }
47442+}
47443+
47444 /* no mutex */
47445 static void
47446 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
47447@@ -484,23 +501,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
47448 return NULL;
47449 }
47450
47451-static inline void
47452-mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
47453-{
47454- if (phy_info->port_details) {
47455- phy_info->port_details->rphy = rphy;
47456- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
47457- ioc->name, rphy));
47458- }
47459-
47460- if (rphy) {
47461- dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
47462- &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
47463- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
47464- ioc->name, rphy, rphy->dev.release));
47465- }
47466-}
47467-
47468 static inline struct sas_port *
47469 mptsas_get_port(struct mptsas_phyinfo *phy_info)
47470 {
47471diff --git a/drivers/mfd/ab8500-debugfs.c b/drivers/mfd/ab8500-debugfs.c
47472index 9a8e185..27ff17d 100644
47473--- a/drivers/mfd/ab8500-debugfs.c
47474+++ b/drivers/mfd/ab8500-debugfs.c
47475@@ -100,7 +100,7 @@ static int irq_last;
47476 static u32 *irq_count;
47477 static int num_irqs;
47478
47479-static struct device_attribute **dev_attr;
47480+static device_attribute_no_const **dev_attr;
47481 static char **event_name;
47482
47483 static u8 avg_sample = SAMPLE_16;
47484diff --git a/drivers/mfd/kempld-core.c b/drivers/mfd/kempld-core.c
47485index 5615522..1eb6f3dc 100644
47486--- a/drivers/mfd/kempld-core.c
47487+++ b/drivers/mfd/kempld-core.c
47488@@ -499,7 +499,7 @@ static struct platform_driver kempld_driver = {
47489 .remove = kempld_remove,
47490 };
47491
47492-static struct dmi_system_id kempld_dmi_table[] __initdata = {
47493+static const struct dmi_system_id kempld_dmi_table[] __initconst = {
47494 {
47495 .ident = "BHL6",
47496 .matches = {
47497diff --git a/drivers/mfd/max8925-i2c.c b/drivers/mfd/max8925-i2c.c
47498index c880c89..45a7c68 100644
47499--- a/drivers/mfd/max8925-i2c.c
47500+++ b/drivers/mfd/max8925-i2c.c
47501@@ -152,7 +152,7 @@ static int max8925_probe(struct i2c_client *client,
47502 const struct i2c_device_id *id)
47503 {
47504 struct max8925_platform_data *pdata = dev_get_platdata(&client->dev);
47505- static struct max8925_chip *chip;
47506+ struct max8925_chip *chip;
47507 struct device_node *node = client->dev.of_node;
47508
47509 if (node && !pdata) {
47510diff --git a/drivers/mfd/tps65910.c b/drivers/mfd/tps65910.c
47511index 7612d89..70549c2 100644
47512--- a/drivers/mfd/tps65910.c
47513+++ b/drivers/mfd/tps65910.c
47514@@ -230,7 +230,7 @@ static int tps65910_irq_init(struct tps65910 *tps65910, int irq,
47515 struct tps65910_platform_data *pdata)
47516 {
47517 int ret = 0;
47518- static struct regmap_irq_chip *tps6591x_irqs_chip;
47519+ struct regmap_irq_chip *tps6591x_irqs_chip;
47520
47521 if (!irq) {
47522 dev_warn(tps65910->dev, "No interrupt support, no core IRQ\n");
47523diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c
47524index 1b772ef..01e77d33 100644
47525--- a/drivers/mfd/twl4030-irq.c
47526+++ b/drivers/mfd/twl4030-irq.c
47527@@ -34,6 +34,7 @@
47528 #include <linux/of.h>
47529 #include <linux/irqdomain.h>
47530 #include <linux/i2c/twl.h>
47531+#include <asm/pgtable.h>
47532
47533 #include "twl-core.h"
47534
47535@@ -729,10 +730,12 @@ int twl4030_init_irq(struct device *dev, int irq_num)
47536 * Install an irq handler for each of the SIH modules;
47537 * clone dummy irq_chip since PIH can't *do* anything
47538 */
47539- twl4030_irq_chip = dummy_irq_chip;
47540- twl4030_irq_chip.name = "twl4030";
47541+ pax_open_kernel();
47542+ memcpy((void *)&twl4030_irq_chip, &dummy_irq_chip, sizeof twl4030_irq_chip);
47543+ *(const char **)&twl4030_irq_chip.name = "twl4030";
47544
47545- twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack;
47546+ *(void **)&twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack;
47547+ pax_close_kernel();
47548
47549 for (i = irq_base; i < irq_end; i++) {
47550 irq_set_chip_and_handler(i, &twl4030_irq_chip,
47551diff --git a/drivers/misc/c2port/core.c b/drivers/misc/c2port/core.c
47552index 464419b..64bae8d 100644
47553--- a/drivers/misc/c2port/core.c
47554+++ b/drivers/misc/c2port/core.c
47555@@ -922,7 +922,9 @@ struct c2port_device *c2port_device_register(char *name,
47556 goto error_idr_alloc;
47557 c2dev->id = ret;
47558
47559- bin_attr_flash_data.size = ops->blocks_num * ops->block_size;
47560+ pax_open_kernel();
47561+ *(size_t *)&bin_attr_flash_data.size = ops->blocks_num * ops->block_size;
47562+ pax_close_kernel();
47563
47564 c2dev->dev = device_create(c2port_class, NULL, 0, c2dev,
47565 "c2port%d", c2dev->id);
47566diff --git a/drivers/misc/eeprom/sunxi_sid.c b/drivers/misc/eeprom/sunxi_sid.c
47567index 8385177..2f54635 100644
47568--- a/drivers/misc/eeprom/sunxi_sid.c
47569+++ b/drivers/misc/eeprom/sunxi_sid.c
47570@@ -126,7 +126,9 @@ static int sunxi_sid_probe(struct platform_device *pdev)
47571
47572 platform_set_drvdata(pdev, sid_data);
47573
47574- sid_bin_attr.size = sid_data->keysize;
47575+ pax_open_kernel();
47576+ *(size_t *)&sid_bin_attr.size = sid_data->keysize;
47577+ pax_close_kernel();
47578 if (device_create_bin_file(&pdev->dev, &sid_bin_attr))
47579 return -ENODEV;
47580
47581diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
47582index 36f5d52..32311c3 100644
47583--- a/drivers/misc/kgdbts.c
47584+++ b/drivers/misc/kgdbts.c
47585@@ -834,7 +834,7 @@ static void run_plant_and_detach_test(int is_early)
47586 char before[BREAK_INSTR_SIZE];
47587 char after[BREAK_INSTR_SIZE];
47588
47589- probe_kernel_read(before, (char *)kgdbts_break_test,
47590+ probe_kernel_read(before, ktla_ktva((char *)kgdbts_break_test),
47591 BREAK_INSTR_SIZE);
47592 init_simple_test();
47593 ts.tst = plant_and_detach_test;
47594@@ -842,7 +842,7 @@ static void run_plant_and_detach_test(int is_early)
47595 /* Activate test with initial breakpoint */
47596 if (!is_early)
47597 kgdb_breakpoint();
47598- probe_kernel_read(after, (char *)kgdbts_break_test,
47599+ probe_kernel_read(after, ktla_ktva((char *)kgdbts_break_test),
47600 BREAK_INSTR_SIZE);
47601 if (memcmp(before, after, BREAK_INSTR_SIZE)) {
47602 printk(KERN_CRIT "kgdbts: ERROR kgdb corrupted memory\n");
47603diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
47604index 3ef4627..8d00486 100644
47605--- a/drivers/misc/lis3lv02d/lis3lv02d.c
47606+++ b/drivers/misc/lis3lv02d/lis3lv02d.c
47607@@ -497,7 +497,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *data)
47608 * the lid is closed. This leads to interrupts as soon as a little move
47609 * is done.
47610 */
47611- atomic_inc(&lis3->count);
47612+ atomic_inc_unchecked(&lis3->count);
47613
47614 wake_up_interruptible(&lis3->misc_wait);
47615 kill_fasync(&lis3->async_queue, SIGIO, POLL_IN);
47616@@ -583,7 +583,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
47617 if (lis3->pm_dev)
47618 pm_runtime_get_sync(lis3->pm_dev);
47619
47620- atomic_set(&lis3->count, 0);
47621+ atomic_set_unchecked(&lis3->count, 0);
47622 return 0;
47623 }
47624
47625@@ -615,7 +615,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
47626 add_wait_queue(&lis3->misc_wait, &wait);
47627 while (true) {
47628 set_current_state(TASK_INTERRUPTIBLE);
47629- data = atomic_xchg(&lis3->count, 0);
47630+ data = atomic_xchg_unchecked(&lis3->count, 0);
47631 if (data)
47632 break;
47633
47634@@ -656,7 +656,7 @@ static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
47635 struct lis3lv02d, miscdev);
47636
47637 poll_wait(file, &lis3->misc_wait, wait);
47638- if (atomic_read(&lis3->count))
47639+ if (atomic_read_unchecked(&lis3->count))
47640 return POLLIN | POLLRDNORM;
47641 return 0;
47642 }
47643diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h
47644index c439c82..1f20f57 100644
47645--- a/drivers/misc/lis3lv02d/lis3lv02d.h
47646+++ b/drivers/misc/lis3lv02d/lis3lv02d.h
47647@@ -297,7 +297,7 @@ struct lis3lv02d {
47648 struct input_polled_dev *idev; /* input device */
47649 struct platform_device *pdev; /* platform device */
47650 struct regulator_bulk_data regulators[2];
47651- atomic_t count; /* interrupt count after last read */
47652+ atomic_unchecked_t count; /* interrupt count after last read */
47653 union axis_conversion ac; /* hw -> logical axis */
47654 int mapped_btns[3];
47655
47656diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
47657index 2f30bad..c4c13d0 100644
47658--- a/drivers/misc/sgi-gru/gruhandles.c
47659+++ b/drivers/misc/sgi-gru/gruhandles.c
47660@@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op op, unsigned long clks)
47661 unsigned long nsec;
47662
47663 nsec = CLKS2NSEC(clks);
47664- atomic_long_inc(&mcs_op_statistics[op].count);
47665- atomic_long_add(nsec, &mcs_op_statistics[op].total);
47666+ atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
47667+ atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
47668 if (mcs_op_statistics[op].max < nsec)
47669 mcs_op_statistics[op].max = nsec;
47670 }
47671diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
47672index 4f76359..cdfcb2e 100644
47673--- a/drivers/misc/sgi-gru/gruprocfs.c
47674+++ b/drivers/misc/sgi-gru/gruprocfs.c
47675@@ -32,9 +32,9 @@
47676
47677 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
47678
47679-static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
47680+static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
47681 {
47682- unsigned long val = atomic_long_read(v);
47683+ unsigned long val = atomic_long_read_unchecked(v);
47684
47685 seq_printf(s, "%16lu %s\n", val, id);
47686 }
47687@@ -134,8 +134,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
47688
47689 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
47690 for (op = 0; op < mcsop_last; op++) {
47691- count = atomic_long_read(&mcs_op_statistics[op].count);
47692- total = atomic_long_read(&mcs_op_statistics[op].total);
47693+ count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
47694+ total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
47695 max = mcs_op_statistics[op].max;
47696 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
47697 count ? total / count : 0, max);
47698diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
47699index 5c3ce24..4915ccb 100644
47700--- a/drivers/misc/sgi-gru/grutables.h
47701+++ b/drivers/misc/sgi-gru/grutables.h
47702@@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
47703 * GRU statistics.
47704 */
47705 struct gru_stats_s {
47706- atomic_long_t vdata_alloc;
47707- atomic_long_t vdata_free;
47708- atomic_long_t gts_alloc;
47709- atomic_long_t gts_free;
47710- atomic_long_t gms_alloc;
47711- atomic_long_t gms_free;
47712- atomic_long_t gts_double_allocate;
47713- atomic_long_t assign_context;
47714- atomic_long_t assign_context_failed;
47715- atomic_long_t free_context;
47716- atomic_long_t load_user_context;
47717- atomic_long_t load_kernel_context;
47718- atomic_long_t lock_kernel_context;
47719- atomic_long_t unlock_kernel_context;
47720- atomic_long_t steal_user_context;
47721- atomic_long_t steal_kernel_context;
47722- atomic_long_t steal_context_failed;
47723- atomic_long_t nopfn;
47724- atomic_long_t asid_new;
47725- atomic_long_t asid_next;
47726- atomic_long_t asid_wrap;
47727- atomic_long_t asid_reuse;
47728- atomic_long_t intr;
47729- atomic_long_t intr_cbr;
47730- atomic_long_t intr_tfh;
47731- atomic_long_t intr_spurious;
47732- atomic_long_t intr_mm_lock_failed;
47733- atomic_long_t call_os;
47734- atomic_long_t call_os_wait_queue;
47735- atomic_long_t user_flush_tlb;
47736- atomic_long_t user_unload_context;
47737- atomic_long_t user_exception;
47738- atomic_long_t set_context_option;
47739- atomic_long_t check_context_retarget_intr;
47740- atomic_long_t check_context_unload;
47741- atomic_long_t tlb_dropin;
47742- atomic_long_t tlb_preload_page;
47743- atomic_long_t tlb_dropin_fail_no_asid;
47744- atomic_long_t tlb_dropin_fail_upm;
47745- atomic_long_t tlb_dropin_fail_invalid;
47746- atomic_long_t tlb_dropin_fail_range_active;
47747- atomic_long_t tlb_dropin_fail_idle;
47748- atomic_long_t tlb_dropin_fail_fmm;
47749- atomic_long_t tlb_dropin_fail_no_exception;
47750- atomic_long_t tfh_stale_on_fault;
47751- atomic_long_t mmu_invalidate_range;
47752- atomic_long_t mmu_invalidate_page;
47753- atomic_long_t flush_tlb;
47754- atomic_long_t flush_tlb_gru;
47755- atomic_long_t flush_tlb_gru_tgh;
47756- atomic_long_t flush_tlb_gru_zero_asid;
47757+ atomic_long_unchecked_t vdata_alloc;
47758+ atomic_long_unchecked_t vdata_free;
47759+ atomic_long_unchecked_t gts_alloc;
47760+ atomic_long_unchecked_t gts_free;
47761+ atomic_long_unchecked_t gms_alloc;
47762+ atomic_long_unchecked_t gms_free;
47763+ atomic_long_unchecked_t gts_double_allocate;
47764+ atomic_long_unchecked_t assign_context;
47765+ atomic_long_unchecked_t assign_context_failed;
47766+ atomic_long_unchecked_t free_context;
47767+ atomic_long_unchecked_t load_user_context;
47768+ atomic_long_unchecked_t load_kernel_context;
47769+ atomic_long_unchecked_t lock_kernel_context;
47770+ atomic_long_unchecked_t unlock_kernel_context;
47771+ atomic_long_unchecked_t steal_user_context;
47772+ atomic_long_unchecked_t steal_kernel_context;
47773+ atomic_long_unchecked_t steal_context_failed;
47774+ atomic_long_unchecked_t nopfn;
47775+ atomic_long_unchecked_t asid_new;
47776+ atomic_long_unchecked_t asid_next;
47777+ atomic_long_unchecked_t asid_wrap;
47778+ atomic_long_unchecked_t asid_reuse;
47779+ atomic_long_unchecked_t intr;
47780+ atomic_long_unchecked_t intr_cbr;
47781+ atomic_long_unchecked_t intr_tfh;
47782+ atomic_long_unchecked_t intr_spurious;
47783+ atomic_long_unchecked_t intr_mm_lock_failed;
47784+ atomic_long_unchecked_t call_os;
47785+ atomic_long_unchecked_t call_os_wait_queue;
47786+ atomic_long_unchecked_t user_flush_tlb;
47787+ atomic_long_unchecked_t user_unload_context;
47788+ atomic_long_unchecked_t user_exception;
47789+ atomic_long_unchecked_t set_context_option;
47790+ atomic_long_unchecked_t check_context_retarget_intr;
47791+ atomic_long_unchecked_t check_context_unload;
47792+ atomic_long_unchecked_t tlb_dropin;
47793+ atomic_long_unchecked_t tlb_preload_page;
47794+ atomic_long_unchecked_t tlb_dropin_fail_no_asid;
47795+ atomic_long_unchecked_t tlb_dropin_fail_upm;
47796+ atomic_long_unchecked_t tlb_dropin_fail_invalid;
47797+ atomic_long_unchecked_t tlb_dropin_fail_range_active;
47798+ atomic_long_unchecked_t tlb_dropin_fail_idle;
47799+ atomic_long_unchecked_t tlb_dropin_fail_fmm;
47800+ atomic_long_unchecked_t tlb_dropin_fail_no_exception;
47801+ atomic_long_unchecked_t tfh_stale_on_fault;
47802+ atomic_long_unchecked_t mmu_invalidate_range;
47803+ atomic_long_unchecked_t mmu_invalidate_page;
47804+ atomic_long_unchecked_t flush_tlb;
47805+ atomic_long_unchecked_t flush_tlb_gru;
47806+ atomic_long_unchecked_t flush_tlb_gru_tgh;
47807+ atomic_long_unchecked_t flush_tlb_gru_zero_asid;
47808
47809- atomic_long_t copy_gpa;
47810- atomic_long_t read_gpa;
47811+ atomic_long_unchecked_t copy_gpa;
47812+ atomic_long_unchecked_t read_gpa;
47813
47814- atomic_long_t mesq_receive;
47815- atomic_long_t mesq_receive_none;
47816- atomic_long_t mesq_send;
47817- atomic_long_t mesq_send_failed;
47818- atomic_long_t mesq_noop;
47819- atomic_long_t mesq_send_unexpected_error;
47820- atomic_long_t mesq_send_lb_overflow;
47821- atomic_long_t mesq_send_qlimit_reached;
47822- atomic_long_t mesq_send_amo_nacked;
47823- atomic_long_t mesq_send_put_nacked;
47824- atomic_long_t mesq_page_overflow;
47825- atomic_long_t mesq_qf_locked;
47826- atomic_long_t mesq_qf_noop_not_full;
47827- atomic_long_t mesq_qf_switch_head_failed;
47828- atomic_long_t mesq_qf_unexpected_error;
47829- atomic_long_t mesq_noop_unexpected_error;
47830- atomic_long_t mesq_noop_lb_overflow;
47831- atomic_long_t mesq_noop_qlimit_reached;
47832- atomic_long_t mesq_noop_amo_nacked;
47833- atomic_long_t mesq_noop_put_nacked;
47834- atomic_long_t mesq_noop_page_overflow;
47835+ atomic_long_unchecked_t mesq_receive;
47836+ atomic_long_unchecked_t mesq_receive_none;
47837+ atomic_long_unchecked_t mesq_send;
47838+ atomic_long_unchecked_t mesq_send_failed;
47839+ atomic_long_unchecked_t mesq_noop;
47840+ atomic_long_unchecked_t mesq_send_unexpected_error;
47841+ atomic_long_unchecked_t mesq_send_lb_overflow;
47842+ atomic_long_unchecked_t mesq_send_qlimit_reached;
47843+ atomic_long_unchecked_t mesq_send_amo_nacked;
47844+ atomic_long_unchecked_t mesq_send_put_nacked;
47845+ atomic_long_unchecked_t mesq_page_overflow;
47846+ atomic_long_unchecked_t mesq_qf_locked;
47847+ atomic_long_unchecked_t mesq_qf_noop_not_full;
47848+ atomic_long_unchecked_t mesq_qf_switch_head_failed;
47849+ atomic_long_unchecked_t mesq_qf_unexpected_error;
47850+ atomic_long_unchecked_t mesq_noop_unexpected_error;
47851+ atomic_long_unchecked_t mesq_noop_lb_overflow;
47852+ atomic_long_unchecked_t mesq_noop_qlimit_reached;
47853+ atomic_long_unchecked_t mesq_noop_amo_nacked;
47854+ atomic_long_unchecked_t mesq_noop_put_nacked;
47855+ atomic_long_unchecked_t mesq_noop_page_overflow;
47856
47857 };
47858
47859@@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
47860 tghop_invalidate, mcsop_last};
47861
47862 struct mcs_op_statistic {
47863- atomic_long_t count;
47864- atomic_long_t total;
47865+ atomic_long_unchecked_t count;
47866+ atomic_long_unchecked_t total;
47867 unsigned long max;
47868 };
47869
47870@@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
47871
47872 #define STAT(id) do { \
47873 if (gru_options & OPT_STATS) \
47874- atomic_long_inc(&gru_stats.id); \
47875+ atomic_long_inc_unchecked(&gru_stats.id); \
47876 } while (0)
47877
47878 #ifdef CONFIG_SGI_GRU_DEBUG
47879diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
47880index c862cd4..0d176fe 100644
47881--- a/drivers/misc/sgi-xp/xp.h
47882+++ b/drivers/misc/sgi-xp/xp.h
47883@@ -288,7 +288,7 @@ struct xpc_interface {
47884 xpc_notify_func, void *);
47885 void (*received) (short, int, void *);
47886 enum xp_retval (*partid_to_nasids) (short, void *);
47887-};
47888+} __no_const;
47889
47890 extern struct xpc_interface xpc_interface;
47891
47892diff --git a/drivers/misc/sgi-xp/xp_main.c b/drivers/misc/sgi-xp/xp_main.c
47893index 01be66d..e3a0c7e 100644
47894--- a/drivers/misc/sgi-xp/xp_main.c
47895+++ b/drivers/misc/sgi-xp/xp_main.c
47896@@ -78,13 +78,13 @@ xpc_notloaded(void)
47897 }
47898
47899 struct xpc_interface xpc_interface = {
47900- (void (*)(int))xpc_notloaded,
47901- (void (*)(int))xpc_notloaded,
47902- (enum xp_retval(*)(short, int, u32, void *, u16))xpc_notloaded,
47903- (enum xp_retval(*)(short, int, u32, void *, u16, xpc_notify_func,
47904+ .connect = (void (*)(int))xpc_notloaded,
47905+ .disconnect = (void (*)(int))xpc_notloaded,
47906+ .send = (enum xp_retval(*)(short, int, u32, void *, u16))xpc_notloaded,
47907+ .send_notify = (enum xp_retval(*)(short, int, u32, void *, u16, xpc_notify_func,
47908 void *))xpc_notloaded,
47909- (void (*)(short, int, void *))xpc_notloaded,
47910- (enum xp_retval(*)(short, void *))xpc_notloaded
47911+ .received = (void (*)(short, int, void *))xpc_notloaded,
47912+ .partid_to_nasids = (enum xp_retval(*)(short, void *))xpc_notloaded
47913 };
47914 EXPORT_SYMBOL_GPL(xpc_interface);
47915
47916diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
47917index b94d5f7..7f494c5 100644
47918--- a/drivers/misc/sgi-xp/xpc.h
47919+++ b/drivers/misc/sgi-xp/xpc.h
47920@@ -835,6 +835,7 @@ struct xpc_arch_operations {
47921 void (*received_payload) (struct xpc_channel *, void *);
47922 void (*notify_senders_of_disconnect) (struct xpc_channel *);
47923 };
47924+typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
47925
47926 /* struct xpc_partition act_state values (for XPC HB) */
47927
47928@@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[];
47929 /* found in xpc_main.c */
47930 extern struct device *xpc_part;
47931 extern struct device *xpc_chan;
47932-extern struct xpc_arch_operations xpc_arch_ops;
47933+extern xpc_arch_operations_no_const xpc_arch_ops;
47934 extern int xpc_disengage_timelimit;
47935 extern int xpc_disengage_timedout;
47936 extern int xpc_activate_IRQ_rcvd;
47937diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
47938index 82dc574..8539ab2 100644
47939--- a/drivers/misc/sgi-xp/xpc_main.c
47940+++ b/drivers/misc/sgi-xp/xpc_main.c
47941@@ -166,7 +166,7 @@ static struct notifier_block xpc_die_notifier = {
47942 .notifier_call = xpc_system_die,
47943 };
47944
47945-struct xpc_arch_operations xpc_arch_ops;
47946+xpc_arch_operations_no_const xpc_arch_ops;
47947
47948 /*
47949 * Timer function to enforce the timelimit on the partition disengage.
47950@@ -1210,7 +1210,7 @@ xpc_system_die(struct notifier_block *nb, unsigned long event, void *_die_args)
47951
47952 if (((die_args->trapnr == X86_TRAP_MF) ||
47953 (die_args->trapnr == X86_TRAP_XF)) &&
47954- !user_mode_vm(die_args->regs))
47955+ !user_mode(die_args->regs))
47956 xpc_die_deactivate();
47957
47958 break;
47959diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
47960index ed2e71a..54c498e 100644
47961--- a/drivers/mmc/card/block.c
47962+++ b/drivers/mmc/card/block.c
47963@@ -577,7 +577,7 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
47964 if (idata->ic.postsleep_min_us)
47965 usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us);
47966
47967- if (copy_to_user(&(ic_ptr->response), cmd.resp, sizeof(cmd.resp))) {
47968+ if (copy_to_user(ic_ptr->response, cmd.resp, sizeof(cmd.resp))) {
47969 err = -EFAULT;
47970 goto cmd_rel_host;
47971 }
47972diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
47973index 18c4afe..43be71e 100644
47974--- a/drivers/mmc/host/dw_mmc.h
47975+++ b/drivers/mmc/host/dw_mmc.h
47976@@ -271,5 +271,5 @@ struct dw_mci_drv_data {
47977 void (*set_ios)(struct dw_mci *host, struct mmc_ios *ios);
47978 int (*parse_dt)(struct dw_mci *host);
47979 int (*execute_tuning)(struct dw_mci_slot *slot);
47980-};
47981+} __do_const;
47982 #endif /* _DW_MMC_H_ */
47983diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
47984index 7fe1619..ae0781b 100644
47985--- a/drivers/mmc/host/mmci.c
47986+++ b/drivers/mmc/host/mmci.c
47987@@ -1630,7 +1630,9 @@ static int mmci_probe(struct amba_device *dev,
47988 mmc->caps |= MMC_CAP_CMD23;
47989
47990 if (variant->busy_detect) {
47991- mmci_ops.card_busy = mmci_card_busy;
47992+ pax_open_kernel();
47993+ *(void **)&mmci_ops.card_busy = mmci_card_busy;
47994+ pax_close_kernel();
47995 mmci_write_datactrlreg(host, MCI_ST_DPSM_BUSYMODE);
47996 mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;
47997 mmc->max_busy_timeout = 0;
47998diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
47999index f84cfb0..aebe5d6 100644
48000--- a/drivers/mmc/host/omap_hsmmc.c
48001+++ b/drivers/mmc/host/omap_hsmmc.c
48002@@ -2054,7 +2054,9 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
48003
48004 if (host->pdata->controller_flags & OMAP_HSMMC_BROKEN_MULTIBLOCK_READ) {
48005 dev_info(&pdev->dev, "multiblock reads disabled due to 35xx erratum 2.1.1.128; MMC read performance may suffer\n");
48006- omap_hsmmc_ops.multi_io_quirk = omap_hsmmc_multi_io_quirk;
48007+ pax_open_kernel();
48008+ *(void **)&omap_hsmmc_ops.multi_io_quirk = omap_hsmmc_multi_io_quirk;
48009+ pax_close_kernel();
48010 }
48011
48012 pm_runtime_enable(host->dev);
48013diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
48014index 10ef824..88461a2 100644
48015--- a/drivers/mmc/host/sdhci-esdhc-imx.c
48016+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
48017@@ -989,9 +989,12 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
48018 host->mmc->caps |= MMC_CAP_1_8V_DDR;
48019 }
48020
48021- if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING)
48022- sdhci_esdhc_ops.platform_execute_tuning =
48023+ if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING) {
48024+ pax_open_kernel();
48025+ *(void **)&sdhci_esdhc_ops.platform_execute_tuning =
48026 esdhc_executing_tuning;
48027+ pax_close_kernel();
48028+ }
48029
48030 if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING)
48031 writel(readl(host->ioaddr + ESDHC_TUNING_CTRL) |
48032diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
48033index c6d2dd7..81b1ca3 100644
48034--- a/drivers/mmc/host/sdhci-s3c.c
48035+++ b/drivers/mmc/host/sdhci-s3c.c
48036@@ -598,9 +598,11 @@ static int sdhci_s3c_probe(struct platform_device *pdev)
48037 * we can use overriding functions instead of default.
48038 */
48039 if (sc->no_divider) {
48040- sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
48041- sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
48042- sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
48043+ pax_open_kernel();
48044+ *(void **)&sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
48045+ *(void **)&sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
48046+ *(void **)&sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
48047+ pax_close_kernel();
48048 }
48049
48050 /* It supports additional host capabilities if needed */
48051diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
48052index 423666b..81ff5eb 100644
48053--- a/drivers/mtd/chips/cfi_cmdset_0020.c
48054+++ b/drivers/mtd/chips/cfi_cmdset_0020.c
48055@@ -666,7 +666,7 @@ cfi_staa_writev(struct mtd_info *mtd, const struct kvec *vecs,
48056 size_t totlen = 0, thislen;
48057 int ret = 0;
48058 size_t buflen = 0;
48059- static char *buffer;
48060+ char *buffer;
48061
48062 if (!ECCBUF_SIZE) {
48063 /* We should fall back to a general writev implementation.
48064diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
48065index f44c606..aa4e804 100644
48066--- a/drivers/mtd/nand/denali.c
48067+++ b/drivers/mtd/nand/denali.c
48068@@ -24,6 +24,7 @@
48069 #include <linux/slab.h>
48070 #include <linux/mtd/mtd.h>
48071 #include <linux/module.h>
48072+#include <linux/slab.h>
48073
48074 #include "denali.h"
48075
48076diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
48077index 33f3c3c..d6bbe6a 100644
48078--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
48079+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
48080@@ -386,7 +386,7 @@ void prepare_data_dma(struct gpmi_nand_data *this, enum dma_data_direction dr)
48081
48082 /* first try to map the upper buffer directly */
48083 if (virt_addr_valid(this->upper_buf) &&
48084- !object_is_on_stack(this->upper_buf)) {
48085+ !object_starts_on_stack(this->upper_buf)) {
48086 sg_init_one(sgl, this->upper_buf, this->upper_len);
48087 ret = dma_map_sg(this->dev, sgl, 1, dr);
48088 if (ret == 0)
48089diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
48090index a5dfbfb..8042ab4 100644
48091--- a/drivers/mtd/nftlmount.c
48092+++ b/drivers/mtd/nftlmount.c
48093@@ -24,6 +24,7 @@
48094 #include <asm/errno.h>
48095 #include <linux/delay.h>
48096 #include <linux/slab.h>
48097+#include <linux/sched.h>
48098 #include <linux/mtd/mtd.h>
48099 #include <linux/mtd/nand.h>
48100 #include <linux/mtd/nftl.h>
48101diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c
48102index c23184a..4115c41 100644
48103--- a/drivers/mtd/sm_ftl.c
48104+++ b/drivers/mtd/sm_ftl.c
48105@@ -56,7 +56,7 @@ static ssize_t sm_attr_show(struct device *dev, struct device_attribute *attr,
48106 #define SM_CIS_VENDOR_OFFSET 0x59
48107 static struct attribute_group *sm_create_sysfs_attributes(struct sm_ftl *ftl)
48108 {
48109- struct attribute_group *attr_group;
48110+ attribute_group_no_const *attr_group;
48111 struct attribute **attributes;
48112 struct sm_sysfs_attribute *vendor_attribute;
48113 char *vendor;
48114diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
48115index 7b11243..b3278a3 100644
48116--- a/drivers/net/bonding/bond_netlink.c
48117+++ b/drivers/net/bonding/bond_netlink.c
48118@@ -585,7 +585,7 @@ nla_put_failure:
48119 return -EMSGSIZE;
48120 }
48121
48122-struct rtnl_link_ops bond_link_ops __read_mostly = {
48123+struct rtnl_link_ops bond_link_ops = {
48124 .kind = "bond",
48125 .priv_size = sizeof(struct bonding),
48126 .setup = bond_setup,
48127diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c
48128index b3b922a..80bba38 100644
48129--- a/drivers/net/caif/caif_hsi.c
48130+++ b/drivers/net/caif/caif_hsi.c
48131@@ -1444,7 +1444,7 @@ err:
48132 return -ENODEV;
48133 }
48134
48135-static struct rtnl_link_ops caif_hsi_link_ops __read_mostly = {
48136+static struct rtnl_link_ops caif_hsi_link_ops = {
48137 .kind = "cfhsi",
48138 .priv_size = sizeof(struct cfhsi),
48139 .setup = cfhsi_setup,
48140diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
48141index 58808f65..0bdc7b3 100644
48142--- a/drivers/net/can/Kconfig
48143+++ b/drivers/net/can/Kconfig
48144@@ -98,7 +98,7 @@ config CAN_JANZ_ICAN3
48145
48146 config CAN_FLEXCAN
48147 tristate "Support for Freescale FLEXCAN based chips"
48148- depends on ARM || PPC
48149+ depends on (ARM && CPU_LITTLE_ENDIAN) || PPC
48150 ---help---
48151 Say Y here if you want to support for Freescale FlexCAN.
48152
48153diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
48154index b0f6924..59e9640 100644
48155--- a/drivers/net/can/dev.c
48156+++ b/drivers/net/can/dev.c
48157@@ -959,7 +959,7 @@ static int can_newlink(struct net *src_net, struct net_device *dev,
48158 return -EOPNOTSUPP;
48159 }
48160
48161-static struct rtnl_link_ops can_link_ops __read_mostly = {
48162+static struct rtnl_link_ops can_link_ops = {
48163 .kind = "can",
48164 .maxtype = IFLA_CAN_MAX,
48165 .policy = can_policy,
48166diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c
48167index 674f367..ec3a31f 100644
48168--- a/drivers/net/can/vcan.c
48169+++ b/drivers/net/can/vcan.c
48170@@ -163,7 +163,7 @@ static void vcan_setup(struct net_device *dev)
48171 dev->destructor = free_netdev;
48172 }
48173
48174-static struct rtnl_link_ops vcan_link_ops __read_mostly = {
48175+static struct rtnl_link_ops vcan_link_ops = {
48176 .kind = "vcan",
48177 .setup = vcan_setup,
48178 };
48179diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
48180index 49adbf1..fff7ff8 100644
48181--- a/drivers/net/dummy.c
48182+++ b/drivers/net/dummy.c
48183@@ -164,7 +164,7 @@ static int dummy_validate(struct nlattr *tb[], struct nlattr *data[])
48184 return 0;
48185 }
48186
48187-static struct rtnl_link_ops dummy_link_ops __read_mostly = {
48188+static struct rtnl_link_ops dummy_link_ops = {
48189 .kind = DRV_NAME,
48190 .setup = dummy_setup,
48191 .validate = dummy_validate,
48192diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
48193index 0443654..4f0aa18 100644
48194--- a/drivers/net/ethernet/8390/ax88796.c
48195+++ b/drivers/net/ethernet/8390/ax88796.c
48196@@ -889,9 +889,11 @@ static int ax_probe(struct platform_device *pdev)
48197 if (ax->plat->reg_offsets)
48198 ei_local->reg_offset = ax->plat->reg_offsets;
48199 else {
48200+ resource_size_t _mem_size = mem_size;
48201+ do_div(_mem_size, 0x18);
48202 ei_local->reg_offset = ax->reg_offsets;
48203 for (ret = 0; ret < 0x18; ret++)
48204- ax->reg_offsets[ret] = (mem_size / 0x18) * ret;
48205+ ax->reg_offsets[ret] = _mem_size * ret;
48206 }
48207
48208 if (!request_mem_region(mem->start, mem_size, pdev->name)) {
48209diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
48210index 6725dc0..163549c 100644
48211--- a/drivers/net/ethernet/altera/altera_tse_main.c
48212+++ b/drivers/net/ethernet/altera/altera_tse_main.c
48213@@ -1216,7 +1216,7 @@ static int tse_shutdown(struct net_device *dev)
48214 return 0;
48215 }
48216
48217-static struct net_device_ops altera_tse_netdev_ops = {
48218+static net_device_ops_no_const altera_tse_netdev_ops __read_only = {
48219 .ndo_open = tse_open,
48220 .ndo_stop = tse_shutdown,
48221 .ndo_start_xmit = tse_start_xmit,
48222@@ -1453,11 +1453,13 @@ static int altera_tse_probe(struct platform_device *pdev)
48223 ndev->netdev_ops = &altera_tse_netdev_ops;
48224 altera_tse_set_ethtool_ops(ndev);
48225
48226+ pax_open_kernel();
48227 altera_tse_netdev_ops.ndo_set_rx_mode = tse_set_rx_mode;
48228
48229 if (priv->hash_filter)
48230 altera_tse_netdev_ops.ndo_set_rx_mode =
48231 tse_set_rx_mode_hashfilter;
48232+ pax_close_kernel();
48233
48234 /* Scatter/gather IO is not supported,
48235 * so it is turned off
48236diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
48237index 29a0927..5a348e24 100644
48238--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
48239+++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
48240@@ -1122,14 +1122,14 @@ do { \
48241 * operations, everything works on mask values.
48242 */
48243 #define XMDIO_READ(_pdata, _mmd, _reg) \
48244- ((_pdata)->hw_if.read_mmd_regs((_pdata), 0, \
48245+ ((_pdata)->hw_if->read_mmd_regs((_pdata), 0, \
48246 MII_ADDR_C45 | (_mmd << 16) | ((_reg) & 0xffff)))
48247
48248 #define XMDIO_READ_BITS(_pdata, _mmd, _reg, _mask) \
48249 (XMDIO_READ((_pdata), _mmd, _reg) & _mask)
48250
48251 #define XMDIO_WRITE(_pdata, _mmd, _reg, _val) \
48252- ((_pdata)->hw_if.write_mmd_regs((_pdata), 0, \
48253+ ((_pdata)->hw_if->write_mmd_regs((_pdata), 0, \
48254 MII_ADDR_C45 | (_mmd << 16) | ((_reg) & 0xffff), (_val)))
48255
48256 #define XMDIO_WRITE_BITS(_pdata, _mmd, _reg, _mask, _val) \
48257diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c b/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
48258index 8a50b01..39c1ad0 100644
48259--- a/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
48260+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
48261@@ -187,7 +187,7 @@ static int xgbe_dcb_ieee_setets(struct net_device *netdev,
48262
48263 memcpy(pdata->ets, ets, sizeof(*pdata->ets));
48264
48265- pdata->hw_if.config_dcb_tc(pdata);
48266+ pdata->hw_if->config_dcb_tc(pdata);
48267
48268 return 0;
48269 }
48270@@ -226,7 +226,7 @@ static int xgbe_dcb_ieee_setpfc(struct net_device *netdev,
48271
48272 memcpy(pdata->pfc, pfc, sizeof(*pdata->pfc));
48273
48274- pdata->hw_if.config_dcb_pfc(pdata);
48275+ pdata->hw_if->config_dcb_pfc(pdata);
48276
48277 return 0;
48278 }
48279diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
48280index 5c92fb7..e0757dc 100644
48281--- a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
48282+++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
48283@@ -347,7 +347,7 @@ static int xgbe_map_rx_buffer(struct xgbe_prv_data *pdata,
48284
48285 static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata)
48286 {
48287- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48288+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48289 struct xgbe_channel *channel;
48290 struct xgbe_ring *ring;
48291 struct xgbe_ring_data *rdata;
48292@@ -388,7 +388,7 @@ static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata)
48293
48294 static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata)
48295 {
48296- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48297+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48298 struct xgbe_channel *channel;
48299 struct xgbe_ring *ring;
48300 struct xgbe_ring_desc *rdesc;
48301@@ -620,17 +620,12 @@ err_out:
48302 return 0;
48303 }
48304
48305-void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *desc_if)
48306-{
48307- DBGPR("-->xgbe_init_function_ptrs_desc\n");
48308-
48309- desc_if->alloc_ring_resources = xgbe_alloc_ring_resources;
48310- desc_if->free_ring_resources = xgbe_free_ring_resources;
48311- desc_if->map_tx_skb = xgbe_map_tx_skb;
48312- desc_if->map_rx_buffer = xgbe_map_rx_buffer;
48313- desc_if->unmap_rdata = xgbe_unmap_rdata;
48314- desc_if->wrapper_tx_desc_init = xgbe_wrapper_tx_descriptor_init;
48315- desc_if->wrapper_rx_desc_init = xgbe_wrapper_rx_descriptor_init;
48316-
48317- DBGPR("<--xgbe_init_function_ptrs_desc\n");
48318-}
48319+const struct xgbe_desc_if default_xgbe_desc_if = {
48320+ .alloc_ring_resources = xgbe_alloc_ring_resources,
48321+ .free_ring_resources = xgbe_free_ring_resources,
48322+ .map_tx_skb = xgbe_map_tx_skb,
48323+ .map_rx_buffer = xgbe_map_rx_buffer,
48324+ .unmap_rdata = xgbe_unmap_rdata,
48325+ .wrapper_tx_desc_init = xgbe_wrapper_tx_descriptor_init,
48326+ .wrapper_rx_desc_init = xgbe_wrapper_rx_descriptor_init,
48327+};
48328diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
48329index 400757b..d8c53f6 100644
48330--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
48331+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
48332@@ -2748,7 +2748,7 @@ static void xgbe_powerdown_rx(struct xgbe_prv_data *pdata)
48333
48334 static int xgbe_init(struct xgbe_prv_data *pdata)
48335 {
48336- struct xgbe_desc_if *desc_if = &pdata->desc_if;
48337+ struct xgbe_desc_if *desc_if = pdata->desc_if;
48338 int ret;
48339
48340 DBGPR("-->xgbe_init\n");
48341@@ -2813,108 +2813,103 @@ static int xgbe_init(struct xgbe_prv_data *pdata)
48342 return 0;
48343 }
48344
48345-void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
48346-{
48347- DBGPR("-->xgbe_init_function_ptrs\n");
48348-
48349- hw_if->tx_complete = xgbe_tx_complete;
48350-
48351- hw_if->set_promiscuous_mode = xgbe_set_promiscuous_mode;
48352- hw_if->set_all_multicast_mode = xgbe_set_all_multicast_mode;
48353- hw_if->add_mac_addresses = xgbe_add_mac_addresses;
48354- hw_if->set_mac_address = xgbe_set_mac_address;
48355-
48356- hw_if->enable_rx_csum = xgbe_enable_rx_csum;
48357- hw_if->disable_rx_csum = xgbe_disable_rx_csum;
48358-
48359- hw_if->enable_rx_vlan_stripping = xgbe_enable_rx_vlan_stripping;
48360- hw_if->disable_rx_vlan_stripping = xgbe_disable_rx_vlan_stripping;
48361- hw_if->enable_rx_vlan_filtering = xgbe_enable_rx_vlan_filtering;
48362- hw_if->disable_rx_vlan_filtering = xgbe_disable_rx_vlan_filtering;
48363- hw_if->update_vlan_hash_table = xgbe_update_vlan_hash_table;
48364-
48365- hw_if->read_mmd_regs = xgbe_read_mmd_regs;
48366- hw_if->write_mmd_regs = xgbe_write_mmd_regs;
48367-
48368- hw_if->set_gmii_speed = xgbe_set_gmii_speed;
48369- hw_if->set_gmii_2500_speed = xgbe_set_gmii_2500_speed;
48370- hw_if->set_xgmii_speed = xgbe_set_xgmii_speed;
48371-
48372- hw_if->enable_tx = xgbe_enable_tx;
48373- hw_if->disable_tx = xgbe_disable_tx;
48374- hw_if->enable_rx = xgbe_enable_rx;
48375- hw_if->disable_rx = xgbe_disable_rx;
48376-
48377- hw_if->powerup_tx = xgbe_powerup_tx;
48378- hw_if->powerdown_tx = xgbe_powerdown_tx;
48379- hw_if->powerup_rx = xgbe_powerup_rx;
48380- hw_if->powerdown_rx = xgbe_powerdown_rx;
48381-
48382- hw_if->dev_xmit = xgbe_dev_xmit;
48383- hw_if->dev_read = xgbe_dev_read;
48384- hw_if->enable_int = xgbe_enable_int;
48385- hw_if->disable_int = xgbe_disable_int;
48386- hw_if->init = xgbe_init;
48387- hw_if->exit = xgbe_exit;
48388+const struct xgbe_hw_if default_xgbe_hw_if = {
48389+ .tx_complete = xgbe_tx_complete,
48390+
48391+ .set_promiscuous_mode = xgbe_set_promiscuous_mode,
48392+ .set_all_multicast_mode = xgbe_set_all_multicast_mode,
48393+ .add_mac_addresses = xgbe_add_mac_addresses,
48394+ .set_mac_address = xgbe_set_mac_address,
48395+
48396+ .enable_rx_csum = xgbe_enable_rx_csum,
48397+ .disable_rx_csum = xgbe_disable_rx_csum,
48398+
48399+ .enable_rx_vlan_stripping = xgbe_enable_rx_vlan_stripping,
48400+ .disable_rx_vlan_stripping = xgbe_disable_rx_vlan_stripping,
48401+ .enable_rx_vlan_filtering = xgbe_enable_rx_vlan_filtering,
48402+ .disable_rx_vlan_filtering = xgbe_disable_rx_vlan_filtering,
48403+ .update_vlan_hash_table = xgbe_update_vlan_hash_table,
48404+
48405+ .read_mmd_regs = xgbe_read_mmd_regs,
48406+ .write_mmd_regs = xgbe_write_mmd_regs,
48407+
48408+ .set_gmii_speed = xgbe_set_gmii_speed,
48409+ .set_gmii_2500_speed = xgbe_set_gmii_2500_speed,
48410+ .set_xgmii_speed = xgbe_set_xgmii_speed,
48411+
48412+ .enable_tx = xgbe_enable_tx,
48413+ .disable_tx = xgbe_disable_tx,
48414+ .enable_rx = xgbe_enable_rx,
48415+ .disable_rx = xgbe_disable_rx,
48416+
48417+ .powerup_tx = xgbe_powerup_tx,
48418+ .powerdown_tx = xgbe_powerdown_tx,
48419+ .powerup_rx = xgbe_powerup_rx,
48420+ .powerdown_rx = xgbe_powerdown_rx,
48421+
48422+ .dev_xmit = xgbe_dev_xmit,
48423+ .dev_read = xgbe_dev_read,
48424+ .enable_int = xgbe_enable_int,
48425+ .disable_int = xgbe_disable_int,
48426+ .init = xgbe_init,
48427+ .exit = xgbe_exit,
48428
48429 /* Descriptor related Sequences have to be initialized here */
48430- hw_if->tx_desc_init = xgbe_tx_desc_init;
48431- hw_if->rx_desc_init = xgbe_rx_desc_init;
48432- hw_if->tx_desc_reset = xgbe_tx_desc_reset;
48433- hw_if->rx_desc_reset = xgbe_rx_desc_reset;
48434- hw_if->is_last_desc = xgbe_is_last_desc;
48435- hw_if->is_context_desc = xgbe_is_context_desc;
48436- hw_if->tx_start_xmit = xgbe_tx_start_xmit;
48437+ .tx_desc_init = xgbe_tx_desc_init,
48438+ .rx_desc_init = xgbe_rx_desc_init,
48439+ .tx_desc_reset = xgbe_tx_desc_reset,
48440+ .rx_desc_reset = xgbe_rx_desc_reset,
48441+ .is_last_desc = xgbe_is_last_desc,
48442+ .is_context_desc = xgbe_is_context_desc,
48443+ .tx_start_xmit = xgbe_tx_start_xmit,
48444
48445 /* For FLOW ctrl */
48446- hw_if->config_tx_flow_control = xgbe_config_tx_flow_control;
48447- hw_if->config_rx_flow_control = xgbe_config_rx_flow_control;
48448+ .config_tx_flow_control = xgbe_config_tx_flow_control,
48449+ .config_rx_flow_control = xgbe_config_rx_flow_control,
48450
48451 /* For RX coalescing */
48452- hw_if->config_rx_coalesce = xgbe_config_rx_coalesce;
48453- hw_if->config_tx_coalesce = xgbe_config_tx_coalesce;
48454- hw_if->usec_to_riwt = xgbe_usec_to_riwt;
48455- hw_if->riwt_to_usec = xgbe_riwt_to_usec;
48456+ .config_rx_coalesce = xgbe_config_rx_coalesce,
48457+ .config_tx_coalesce = xgbe_config_tx_coalesce,
48458+ .usec_to_riwt = xgbe_usec_to_riwt,
48459+ .riwt_to_usec = xgbe_riwt_to_usec,
48460
48461 /* For RX and TX threshold config */
48462- hw_if->config_rx_threshold = xgbe_config_rx_threshold;
48463- hw_if->config_tx_threshold = xgbe_config_tx_threshold;
48464+ .config_rx_threshold = xgbe_config_rx_threshold,
48465+ .config_tx_threshold = xgbe_config_tx_threshold,
48466
48467 /* For RX and TX Store and Forward Mode config */
48468- hw_if->config_rsf_mode = xgbe_config_rsf_mode;
48469- hw_if->config_tsf_mode = xgbe_config_tsf_mode;
48470+ .config_rsf_mode = xgbe_config_rsf_mode,
48471+ .config_tsf_mode = xgbe_config_tsf_mode,
48472
48473 /* For TX DMA Operating on Second Frame config */
48474- hw_if->config_osp_mode = xgbe_config_osp_mode;
48475+ .config_osp_mode = xgbe_config_osp_mode,
48476
48477 /* For RX and TX PBL config */
48478- hw_if->config_rx_pbl_val = xgbe_config_rx_pbl_val;
48479- hw_if->get_rx_pbl_val = xgbe_get_rx_pbl_val;
48480- hw_if->config_tx_pbl_val = xgbe_config_tx_pbl_val;
48481- hw_if->get_tx_pbl_val = xgbe_get_tx_pbl_val;
48482- hw_if->config_pblx8 = xgbe_config_pblx8;
48483+ .config_rx_pbl_val = xgbe_config_rx_pbl_val,
48484+ .get_rx_pbl_val = xgbe_get_rx_pbl_val,
48485+ .config_tx_pbl_val = xgbe_config_tx_pbl_val,
48486+ .get_tx_pbl_val = xgbe_get_tx_pbl_val,
48487+ .config_pblx8 = xgbe_config_pblx8,
48488
48489 /* For MMC statistics support */
48490- hw_if->tx_mmc_int = xgbe_tx_mmc_int;
48491- hw_if->rx_mmc_int = xgbe_rx_mmc_int;
48492- hw_if->read_mmc_stats = xgbe_read_mmc_stats;
48493+ .tx_mmc_int = xgbe_tx_mmc_int,
48494+ .rx_mmc_int = xgbe_rx_mmc_int,
48495+ .read_mmc_stats = xgbe_read_mmc_stats,
48496
48497 /* For PTP config */
48498- hw_if->config_tstamp = xgbe_config_tstamp;
48499- hw_if->update_tstamp_addend = xgbe_update_tstamp_addend;
48500- hw_if->set_tstamp_time = xgbe_set_tstamp_time;
48501- hw_if->get_tstamp_time = xgbe_get_tstamp_time;
48502- hw_if->get_tx_tstamp = xgbe_get_tx_tstamp;
48503+ .config_tstamp = xgbe_config_tstamp,
48504+ .update_tstamp_addend = xgbe_update_tstamp_addend,
48505+ .set_tstamp_time = xgbe_set_tstamp_time,
48506+ .get_tstamp_time = xgbe_get_tstamp_time,
48507+ .get_tx_tstamp = xgbe_get_tx_tstamp,
48508
48509 /* For Data Center Bridging config */
48510- hw_if->config_dcb_tc = xgbe_config_dcb_tc;
48511- hw_if->config_dcb_pfc = xgbe_config_dcb_pfc;
48512+ .config_dcb_tc = xgbe_config_dcb_tc,
48513+ .config_dcb_pfc = xgbe_config_dcb_pfc,
48514
48515 /* For Receive Side Scaling */
48516- hw_if->enable_rss = xgbe_enable_rss;
48517- hw_if->disable_rss = xgbe_disable_rss;
48518- hw_if->set_rss_hash_key = xgbe_set_rss_hash_key;
48519- hw_if->set_rss_lookup_table = xgbe_set_rss_lookup_table;
48520-
48521- DBGPR("<--xgbe_init_function_ptrs\n");
48522-}
48523+ .enable_rss = xgbe_enable_rss,
48524+ .disable_rss = xgbe_disable_rss,
48525+ .set_rss_hash_key = xgbe_set_rss_hash_key,
48526+ .set_rss_lookup_table = xgbe_set_rss_lookup_table,
48527+};
48528diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
48529index 885b02b..4b31a4c 100644
48530--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
48531+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
48532@@ -244,7 +244,7 @@ static int xgbe_maybe_stop_tx_queue(struct xgbe_channel *channel,
48533 * support, tell it now
48534 */
48535 if (ring->tx.xmit_more)
48536- pdata->hw_if.tx_start_xmit(channel, ring);
48537+ pdata->hw_if->tx_start_xmit(channel, ring);
48538
48539 return NETDEV_TX_BUSY;
48540 }
48541@@ -272,7 +272,7 @@ static int xgbe_calc_rx_buf_size(struct net_device *netdev, unsigned int mtu)
48542
48543 static void xgbe_enable_rx_tx_ints(struct xgbe_prv_data *pdata)
48544 {
48545- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48546+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48547 struct xgbe_channel *channel;
48548 enum xgbe_int int_id;
48549 unsigned int i;
48550@@ -294,7 +294,7 @@ static void xgbe_enable_rx_tx_ints(struct xgbe_prv_data *pdata)
48551
48552 static void xgbe_disable_rx_tx_ints(struct xgbe_prv_data *pdata)
48553 {
48554- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48555+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48556 struct xgbe_channel *channel;
48557 enum xgbe_int int_id;
48558 unsigned int i;
48559@@ -317,7 +317,7 @@ static void xgbe_disable_rx_tx_ints(struct xgbe_prv_data *pdata)
48560 static irqreturn_t xgbe_isr(int irq, void *data)
48561 {
48562 struct xgbe_prv_data *pdata = data;
48563- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48564+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48565 struct xgbe_channel *channel;
48566 unsigned int dma_isr, dma_ch_isr;
48567 unsigned int mac_isr, mac_tssr;
48568@@ -673,7 +673,7 @@ static void xgbe_free_irqs(struct xgbe_prv_data *pdata)
48569
48570 void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata)
48571 {
48572- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48573+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48574
48575 DBGPR("-->xgbe_init_tx_coalesce\n");
48576
48577@@ -687,7 +687,7 @@ void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata)
48578
48579 void xgbe_init_rx_coalesce(struct xgbe_prv_data *pdata)
48580 {
48581- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48582+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48583
48584 DBGPR("-->xgbe_init_rx_coalesce\n");
48585
48586@@ -701,7 +701,7 @@ void xgbe_init_rx_coalesce(struct xgbe_prv_data *pdata)
48587
48588 static void xgbe_free_tx_data(struct xgbe_prv_data *pdata)
48589 {
48590- struct xgbe_desc_if *desc_if = &pdata->desc_if;
48591+ struct xgbe_desc_if *desc_if = pdata->desc_if;
48592 struct xgbe_channel *channel;
48593 struct xgbe_ring *ring;
48594 struct xgbe_ring_data *rdata;
48595@@ -726,7 +726,7 @@ static void xgbe_free_tx_data(struct xgbe_prv_data *pdata)
48596
48597 static void xgbe_free_rx_data(struct xgbe_prv_data *pdata)
48598 {
48599- struct xgbe_desc_if *desc_if = &pdata->desc_if;
48600+ struct xgbe_desc_if *desc_if = pdata->desc_if;
48601 struct xgbe_channel *channel;
48602 struct xgbe_ring *ring;
48603 struct xgbe_ring_data *rdata;
48604@@ -752,7 +752,7 @@ static void xgbe_free_rx_data(struct xgbe_prv_data *pdata)
48605 static void xgbe_adjust_link(struct net_device *netdev)
48606 {
48607 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48608- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48609+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48610 struct phy_device *phydev = pdata->phydev;
48611 int new_state = 0;
48612
48613@@ -860,7 +860,7 @@ static void xgbe_phy_exit(struct xgbe_prv_data *pdata)
48614 int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
48615 {
48616 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48617- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48618+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48619 unsigned long flags;
48620
48621 DBGPR("-->xgbe_powerdown\n");
48622@@ -898,7 +898,7 @@ int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
48623 int xgbe_powerup(struct net_device *netdev, unsigned int caller)
48624 {
48625 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48626- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48627+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48628 unsigned long flags;
48629
48630 DBGPR("-->xgbe_powerup\n");
48631@@ -935,7 +935,7 @@ int xgbe_powerup(struct net_device *netdev, unsigned int caller)
48632
48633 static int xgbe_start(struct xgbe_prv_data *pdata)
48634 {
48635- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48636+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48637 struct net_device *netdev = pdata->netdev;
48638 int ret;
48639
48640@@ -976,7 +976,7 @@ err_napi:
48641
48642 static void xgbe_stop(struct xgbe_prv_data *pdata)
48643 {
48644- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48645+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48646 struct xgbe_channel *channel;
48647 struct net_device *netdev = pdata->netdev;
48648 struct netdev_queue *txq;
48649@@ -1203,7 +1203,7 @@ static int xgbe_set_hwtstamp_settings(struct xgbe_prv_data *pdata,
48650 return -ERANGE;
48651 }
48652
48653- pdata->hw_if.config_tstamp(pdata, mac_tscr);
48654+ pdata->hw_if->config_tstamp(pdata, mac_tscr);
48655
48656 memcpy(&pdata->tstamp_config, &config, sizeof(config));
48657
48658@@ -1352,7 +1352,7 @@ static void xgbe_packet_info(struct xgbe_prv_data *pdata,
48659 static int xgbe_open(struct net_device *netdev)
48660 {
48661 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48662- struct xgbe_desc_if *desc_if = &pdata->desc_if;
48663+ struct xgbe_desc_if *desc_if = pdata->desc_if;
48664 int ret;
48665
48666 DBGPR("-->xgbe_open\n");
48667@@ -1424,7 +1424,7 @@ err_phy_init:
48668 static int xgbe_close(struct net_device *netdev)
48669 {
48670 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48671- struct xgbe_desc_if *desc_if = &pdata->desc_if;
48672+ struct xgbe_desc_if *desc_if = pdata->desc_if;
48673
48674 DBGPR("-->xgbe_close\n");
48675
48676@@ -1452,8 +1452,8 @@ static int xgbe_close(struct net_device *netdev)
48677 static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
48678 {
48679 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48680- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48681- struct xgbe_desc_if *desc_if = &pdata->desc_if;
48682+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48683+ struct xgbe_desc_if *desc_if = pdata->desc_if;
48684 struct xgbe_channel *channel;
48685 struct xgbe_ring *ring;
48686 struct xgbe_packet_data *packet;
48687@@ -1521,7 +1521,7 @@ tx_netdev_return:
48688 static void xgbe_set_rx_mode(struct net_device *netdev)
48689 {
48690 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48691- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48692+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48693 unsigned int pr_mode, am_mode;
48694
48695 DBGPR("-->xgbe_set_rx_mode\n");
48696@@ -1540,7 +1540,7 @@ static void xgbe_set_rx_mode(struct net_device *netdev)
48697 static int xgbe_set_mac_address(struct net_device *netdev, void *addr)
48698 {
48699 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48700- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48701+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48702 struct sockaddr *saddr = addr;
48703
48704 DBGPR("-->xgbe_set_mac_address\n");
48705@@ -1607,7 +1607,7 @@ static struct rtnl_link_stats64 *xgbe_get_stats64(struct net_device *netdev,
48706
48707 DBGPR("-->%s\n", __func__);
48708
48709- pdata->hw_if.read_mmc_stats(pdata);
48710+ pdata->hw_if->read_mmc_stats(pdata);
48711
48712 s->rx_packets = pstats->rxframecount_gb;
48713 s->rx_bytes = pstats->rxoctetcount_gb;
48714@@ -1634,7 +1634,7 @@ static int xgbe_vlan_rx_add_vid(struct net_device *netdev, __be16 proto,
48715 u16 vid)
48716 {
48717 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48718- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48719+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48720
48721 DBGPR("-->%s\n", __func__);
48722
48723@@ -1650,7 +1650,7 @@ static int xgbe_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto,
48724 u16 vid)
48725 {
48726 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48727- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48728+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48729
48730 DBGPR("-->%s\n", __func__);
48731
48732@@ -1716,7 +1716,7 @@ static int xgbe_set_features(struct net_device *netdev,
48733 netdev_features_t features)
48734 {
48735 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48736- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48737+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48738 netdev_features_t rxhash, rxcsum, rxvlan, rxvlan_filter;
48739 int ret = 0;
48740
48741@@ -1781,8 +1781,8 @@ struct net_device_ops *xgbe_get_netdev_ops(void)
48742 static void xgbe_rx_refresh(struct xgbe_channel *channel)
48743 {
48744 struct xgbe_prv_data *pdata = channel->pdata;
48745- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48746- struct xgbe_desc_if *desc_if = &pdata->desc_if;
48747+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48748+ struct xgbe_desc_if *desc_if = pdata->desc_if;
48749 struct xgbe_ring *ring = channel->rx_ring;
48750 struct xgbe_ring_data *rdata;
48751
48752@@ -1835,8 +1835,8 @@ static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata,
48753 static int xgbe_tx_poll(struct xgbe_channel *channel)
48754 {
48755 struct xgbe_prv_data *pdata = channel->pdata;
48756- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48757- struct xgbe_desc_if *desc_if = &pdata->desc_if;
48758+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48759+ struct xgbe_desc_if *desc_if = pdata->desc_if;
48760 struct xgbe_ring *ring = channel->tx_ring;
48761 struct xgbe_ring_data *rdata;
48762 struct xgbe_ring_desc *rdesc;
48763@@ -1901,7 +1901,7 @@ static int xgbe_tx_poll(struct xgbe_channel *channel)
48764 static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
48765 {
48766 struct xgbe_prv_data *pdata = channel->pdata;
48767- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48768+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48769 struct xgbe_ring *ring = channel->rx_ring;
48770 struct xgbe_ring_data *rdata;
48771 struct xgbe_packet_data *packet;
48772diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
48773index ebf4893..a8f51c6 100644
48774--- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
48775+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
48776@@ -203,7 +203,7 @@ static void xgbe_get_ethtool_stats(struct net_device *netdev,
48777
48778 DBGPR("-->%s\n", __func__);
48779
48780- pdata->hw_if.read_mmc_stats(pdata);
48781+ pdata->hw_if->read_mmc_stats(pdata);
48782 for (i = 0; i < XGBE_STATS_COUNT; i++) {
48783 stat = (u8 *)pdata + xgbe_gstring_stats[i].stat_offset;
48784 *data++ = *(u64 *)stat;
48785@@ -378,7 +378,7 @@ static int xgbe_get_coalesce(struct net_device *netdev,
48786 struct ethtool_coalesce *ec)
48787 {
48788 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48789- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48790+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48791 unsigned int riwt;
48792
48793 DBGPR("-->xgbe_get_coalesce\n");
48794@@ -401,7 +401,7 @@ static int xgbe_set_coalesce(struct net_device *netdev,
48795 struct ethtool_coalesce *ec)
48796 {
48797 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48798- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48799+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48800 unsigned int rx_frames, rx_riwt, rx_usecs;
48801 unsigned int tx_frames, tx_usecs;
48802
48803@@ -536,7 +536,7 @@ static int xgbe_set_rxfh(struct net_device *netdev, const u32 *indir,
48804 const u8 *key, const u8 hfunc)
48805 {
48806 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48807- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48808+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48809 unsigned int ret;
48810
48811 if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
48812diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
48813index 32dd651..225cca3 100644
48814--- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c
48815+++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
48816@@ -159,12 +159,6 @@ static void xgbe_default_config(struct xgbe_prv_data *pdata)
48817 DBGPR("<--xgbe_default_config\n");
48818 }
48819
48820-static void xgbe_init_all_fptrs(struct xgbe_prv_data *pdata)
48821-{
48822- xgbe_init_function_ptrs_dev(&pdata->hw_if);
48823- xgbe_init_function_ptrs_desc(&pdata->desc_if);
48824-}
48825-
48826 #ifdef CONFIG_ACPI
48827 static int xgbe_acpi_support(struct xgbe_prv_data *pdata)
48828 {
48829@@ -396,9 +390,8 @@ static int xgbe_probe(struct platform_device *pdev)
48830 memcpy(netdev->dev_addr, pdata->mac_addr, netdev->addr_len);
48831
48832 /* Set all the function pointers */
48833- xgbe_init_all_fptrs(pdata);
48834- hw_if = &pdata->hw_if;
48835- desc_if = &pdata->desc_if;
48836+ hw_if = pdata->hw_if = &default_xgbe_hw_if;
48837+ desc_if = pdata->desc_if = &default_xgbe_desc_if;
48838
48839 /* Issue software reset to device */
48840 hw_if->exit(pdata);
48841diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
48842index 59e267f..0842a88 100644
48843--- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
48844+++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
48845@@ -126,7 +126,7 @@
48846 static int xgbe_mdio_read(struct mii_bus *mii, int prtad, int mmd_reg)
48847 {
48848 struct xgbe_prv_data *pdata = mii->priv;
48849- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48850+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48851 int mmd_data;
48852
48853 DBGPR_MDIO("-->xgbe_mdio_read: prtad=%#x mmd_reg=%#x\n",
48854@@ -143,7 +143,7 @@ static int xgbe_mdio_write(struct mii_bus *mii, int prtad, int mmd_reg,
48855 u16 mmd_val)
48856 {
48857 struct xgbe_prv_data *pdata = mii->priv;
48858- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48859+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48860 int mmd_data = mmd_val;
48861
48862 DBGPR_MDIO("-->xgbe_mdio_write: prtad=%#x mmd_reg=%#x mmd_data=%#x\n",
48863diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
48864index f326178..8bd7daf 100644
48865--- a/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
48866+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
48867@@ -129,7 +129,7 @@ static cycle_t xgbe_cc_read(const struct cyclecounter *cc)
48868 tstamp_cc);
48869 u64 nsec;
48870
48871- nsec = pdata->hw_if.get_tstamp_time(pdata);
48872+ nsec = pdata->hw_if->get_tstamp_time(pdata);
48873
48874 return nsec;
48875 }
48876@@ -158,7 +158,7 @@ static int xgbe_adjfreq(struct ptp_clock_info *info, s32 delta)
48877
48878 spin_lock_irqsave(&pdata->tstamp_lock, flags);
48879
48880- pdata->hw_if.update_tstamp_addend(pdata, addend);
48881+ pdata->hw_if->update_tstamp_addend(pdata, addend);
48882
48883 spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
48884
48885diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
48886index 13e8f95..1d8beef 100644
48887--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
48888+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
48889@@ -675,8 +675,8 @@ struct xgbe_prv_data {
48890 int dev_irq;
48891 unsigned int per_channel_irq;
48892
48893- struct xgbe_hw_if hw_if;
48894- struct xgbe_desc_if desc_if;
48895+ struct xgbe_hw_if *hw_if;
48896+ struct xgbe_desc_if *desc_if;
48897
48898 /* AXI DMA settings */
48899 unsigned int coherent;
48900@@ -798,6 +798,9 @@ struct xgbe_prv_data {
48901 #endif
48902 };
48903
48904+extern const struct xgbe_hw_if default_xgbe_hw_if;
48905+extern const struct xgbe_desc_if default_xgbe_desc_if;
48906+
48907 /* Function prototypes*/
48908
48909 void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *);
48910diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
48911index adcacda..fa6e0ae 100644
48912--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
48913+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
48914@@ -1065,7 +1065,7 @@ static inline u8 bnx2x_get_path_func_num(struct bnx2x *bp)
48915 static inline void bnx2x_init_bp_objs(struct bnx2x *bp)
48916 {
48917 /* RX_MODE controlling object */
48918- bnx2x_init_rx_mode_obj(bp, &bp->rx_mode_obj);
48919+ bnx2x_init_rx_mode_obj(bp);
48920
48921 /* multicast configuration controlling object */
48922 bnx2x_init_mcast_obj(bp, &bp->mcast_obj, bp->fp->cl_id, bp->fp->cid,
48923diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
48924index 07cdf9b..b08ecc7 100644
48925--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
48926+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
48927@@ -2329,15 +2329,14 @@ int bnx2x_config_rx_mode(struct bnx2x *bp,
48928 return rc;
48929 }
48930
48931-void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
48932- struct bnx2x_rx_mode_obj *o)
48933+void bnx2x_init_rx_mode_obj(struct bnx2x *bp)
48934 {
48935 if (CHIP_IS_E1x(bp)) {
48936- o->wait_comp = bnx2x_empty_rx_mode_wait;
48937- o->config_rx_mode = bnx2x_set_rx_mode_e1x;
48938+ bp->rx_mode_obj.wait_comp = bnx2x_empty_rx_mode_wait;
48939+ bp->rx_mode_obj.config_rx_mode = bnx2x_set_rx_mode_e1x;
48940 } else {
48941- o->wait_comp = bnx2x_wait_rx_mode_comp_e2;
48942- o->config_rx_mode = bnx2x_set_rx_mode_e2;
48943+ bp->rx_mode_obj.wait_comp = bnx2x_wait_rx_mode_comp_e2;
48944+ bp->rx_mode_obj.config_rx_mode = bnx2x_set_rx_mode_e2;
48945 }
48946 }
48947
48948diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
48949index 86baecb..ff3bb46 100644
48950--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
48951+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
48952@@ -1411,8 +1411,7 @@ int bnx2x_vlan_mac_move(struct bnx2x *bp,
48953
48954 /********************* RX MODE ****************/
48955
48956-void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
48957- struct bnx2x_rx_mode_obj *o);
48958+void bnx2x_init_rx_mode_obj(struct bnx2x *bp);
48959
48960 /**
48961 * bnx2x_config_rx_mode - Send and RX_MODE ramrod according to the provided parameters.
48962diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
48963index 31c9f82..e65e986 100644
48964--- a/drivers/net/ethernet/broadcom/tg3.h
48965+++ b/drivers/net/ethernet/broadcom/tg3.h
48966@@ -150,6 +150,7 @@
48967 #define CHIPREV_ID_5750_A0 0x4000
48968 #define CHIPREV_ID_5750_A1 0x4001
48969 #define CHIPREV_ID_5750_A3 0x4003
48970+#define CHIPREV_ID_5750_C1 0x4201
48971 #define CHIPREV_ID_5750_C2 0x4202
48972 #define CHIPREV_ID_5752_A0_HW 0x5000
48973 #define CHIPREV_ID_5752_A0 0x6000
48974diff --git a/drivers/net/ethernet/brocade/bna/bna_enet.c b/drivers/net/ethernet/brocade/bna/bna_enet.c
48975index 903466e..b285864 100644
48976--- a/drivers/net/ethernet/brocade/bna/bna_enet.c
48977+++ b/drivers/net/ethernet/brocade/bna/bna_enet.c
48978@@ -1693,10 +1693,10 @@ bna_cb_ioceth_reset(void *arg)
48979 }
48980
48981 static struct bfa_ioc_cbfn bna_ioceth_cbfn = {
48982- bna_cb_ioceth_enable,
48983- bna_cb_ioceth_disable,
48984- bna_cb_ioceth_hbfail,
48985- bna_cb_ioceth_reset
48986+ .enable_cbfn = bna_cb_ioceth_enable,
48987+ .disable_cbfn = bna_cb_ioceth_disable,
48988+ .hbfail_cbfn = bna_cb_ioceth_hbfail,
48989+ .reset_cbfn = bna_cb_ioceth_reset
48990 };
48991
48992 static void bna_attr_init(struct bna_ioceth *ioceth)
48993diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
48994index 8cffcdf..aadf043 100644
48995--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
48996+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
48997@@ -87,7 +87,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
48998 */
48999 struct l2t_skb_cb {
49000 arp_failure_handler_func arp_failure_handler;
49001-};
49002+} __no_const;
49003
49004 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
49005
49006diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
49007index d929951..a2c23f5 100644
49008--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
49009+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
49010@@ -2215,7 +2215,7 @@ static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
49011
49012 int i;
49013 struct adapter *ap = netdev2adap(dev);
49014- static const unsigned int *reg_ranges;
49015+ const unsigned int *reg_ranges;
49016 int arr_size = 0, buf_size = 0;
49017
49018 if (is_t4(ap->params.chip)) {
49019diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
49020index badff18..e15c4ec 100644
49021--- a/drivers/net/ethernet/dec/tulip/de4x5.c
49022+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
49023@@ -5373,7 +5373,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
49024 for (i=0; i<ETH_ALEN; i++) {
49025 tmp.addr[i] = dev->dev_addr[i];
49026 }
49027- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
49028+ if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
49029 break;
49030
49031 case DE4X5_SET_HWADDR: /* Set the hardware address */
49032@@ -5413,7 +5413,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
49033 spin_lock_irqsave(&lp->lock, flags);
49034 memcpy(&statbuf, &lp->pktStats, ioc->len);
49035 spin_unlock_irqrestore(&lp->lock, flags);
49036- if (copy_to_user(ioc->data, &statbuf, ioc->len))
49037+ if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
49038 return -EFAULT;
49039 break;
49040 }
49041diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
49042index 893753f..3b5d790 100644
49043--- a/drivers/net/ethernet/emulex/benet/be_main.c
49044+++ b/drivers/net/ethernet/emulex/benet/be_main.c
49045@@ -536,7 +536,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
49046
49047 if (wrapped)
49048 newacc += 65536;
49049- ACCESS_ONCE(*acc) = newacc;
49050+ ACCESS_ONCE_RW(*acc) = newacc;
49051 }
49052
49053 static void populate_erx_stats(struct be_adapter *adapter,
49054diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
49055index 6d0c5d5..55be363 100644
49056--- a/drivers/net/ethernet/faraday/ftgmac100.c
49057+++ b/drivers/net/ethernet/faraday/ftgmac100.c
49058@@ -30,6 +30,8 @@
49059 #include <linux/netdevice.h>
49060 #include <linux/phy.h>
49061 #include <linux/platform_device.h>
49062+#include <linux/interrupt.h>
49063+#include <linux/irqreturn.h>
49064 #include <net/ip.h>
49065
49066 #include "ftgmac100.h"
49067diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
49068index dce5f7b..2433466 100644
49069--- a/drivers/net/ethernet/faraday/ftmac100.c
49070+++ b/drivers/net/ethernet/faraday/ftmac100.c
49071@@ -31,6 +31,8 @@
49072 #include <linux/module.h>
49073 #include <linux/netdevice.h>
49074 #include <linux/platform_device.h>
49075+#include <linux/interrupt.h>
49076+#include <linux/irqreturn.h>
49077
49078 #include "ftmac100.h"
49079
49080diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
49081index fabcfa1..188fd22 100644
49082--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
49083+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
49084@@ -419,7 +419,7 @@ void i40e_ptp_set_increment(struct i40e_pf *pf)
49085 wr32(hw, I40E_PRTTSYN_INC_H, incval >> 32);
49086
49087 /* Update the base adjustement value. */
49088- ACCESS_ONCE(pf->ptp_base_adj) = incval;
49089+ ACCESS_ONCE_RW(pf->ptp_base_adj) = incval;
49090 smp_mb(); /* Force the above update. */
49091 }
49092
49093diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
49094index 79c00f5..8da39f6 100644
49095--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
49096+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
49097@@ -785,7 +785,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
49098 }
49099
49100 /* update the base incval used to calculate frequency adjustment */
49101- ACCESS_ONCE(adapter->base_incval) = incval;
49102+ ACCESS_ONCE_RW(adapter->base_incval) = incval;
49103 smp_mb();
49104
49105 /* need lock to prevent incorrect read while modifying cyclecounter */
49106diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
49107index 35dd887..38b3476 100644
49108--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
49109+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
49110@@ -475,8 +475,8 @@ static bool mlx4_en_process_tx_cq(struct net_device *dev,
49111 wmb();
49112
49113 /* we want to dirty this cache line once */
49114- ACCESS_ONCE(ring->last_nr_txbb) = last_nr_txbb;
49115- ACCESS_ONCE(ring->cons) = ring_cons + txbbs_skipped;
49116+ ACCESS_ONCE_RW(ring->last_nr_txbb) = last_nr_txbb;
49117+ ACCESS_ONCE_RW(ring->cons) = ring_cons + txbbs_skipped;
49118
49119 netdev_tx_completed_queue(ring->tx_queue, packets, bytes);
49120
49121diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c
49122index 6223930..975033d 100644
49123--- a/drivers/net/ethernet/neterion/vxge/vxge-config.c
49124+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c
49125@@ -3457,7 +3457,10 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
49126 struct __vxge_hw_fifo *fifo;
49127 struct vxge_hw_fifo_config *config;
49128 u32 txdl_size, txdl_per_memblock;
49129- struct vxge_hw_mempool_cbs fifo_mp_callback;
49130+ static struct vxge_hw_mempool_cbs fifo_mp_callback = {
49131+ .item_func_alloc = __vxge_hw_fifo_mempool_item_alloc,
49132+ };
49133+
49134 struct __vxge_hw_virtualpath *vpath;
49135
49136 if ((vp == NULL) || (attr == NULL)) {
49137@@ -3540,8 +3543,6 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
49138 goto exit;
49139 }
49140
49141- fifo_mp_callback.item_func_alloc = __vxge_hw_fifo_mempool_item_alloc;
49142-
49143 fifo->mempool =
49144 __vxge_hw_mempool_create(vpath->hldev,
49145 fifo->config->memblock_size,
49146diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
49147index 2bb48d5..d1a865d 100644
49148--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
49149+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
49150@@ -2324,7 +2324,9 @@ int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter)
49151 max_tx_rings = QLCNIC_MAX_VNIC_TX_RINGS;
49152 } else if (ret == QLC_83XX_DEFAULT_OPMODE) {
49153 ahw->nic_mode = QLCNIC_DEFAULT_MODE;
49154- adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
49155+ pax_open_kernel();
49156+ *(void **)&adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
49157+ pax_close_kernel();
49158 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
49159 max_sds_rings = QLCNIC_MAX_SDS_RINGS;
49160 max_tx_rings = QLCNIC_MAX_TX_RINGS;
49161diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
49162index be7d7a6..a8983f8 100644
49163--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
49164+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
49165@@ -207,17 +207,23 @@ int qlcnic_83xx_config_vnic_opmode(struct qlcnic_adapter *adapter)
49166 case QLCNIC_NON_PRIV_FUNC:
49167 ahw->op_mode = QLCNIC_NON_PRIV_FUNC;
49168 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
49169- nic_ops->init_driver = qlcnic_83xx_init_non_privileged_vnic;
49170+ pax_open_kernel();
49171+ *(void **)&nic_ops->init_driver = qlcnic_83xx_init_non_privileged_vnic;
49172+ pax_close_kernel();
49173 break;
49174 case QLCNIC_PRIV_FUNC:
49175 ahw->op_mode = QLCNIC_PRIV_FUNC;
49176 ahw->idc.state_entry = qlcnic_83xx_idc_vnic_pf_entry;
49177- nic_ops->init_driver = qlcnic_83xx_init_privileged_vnic;
49178+ pax_open_kernel();
49179+ *(void **)&nic_ops->init_driver = qlcnic_83xx_init_privileged_vnic;
49180+ pax_close_kernel();
49181 break;
49182 case QLCNIC_MGMT_FUNC:
49183 ahw->op_mode = QLCNIC_MGMT_FUNC;
49184 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
49185- nic_ops->init_driver = qlcnic_83xx_init_mgmt_vnic;
49186+ pax_open_kernel();
49187+ *(void **)&nic_ops->init_driver = qlcnic_83xx_init_mgmt_vnic;
49188+ pax_close_kernel();
49189 break;
49190 default:
49191 dev_err(&adapter->pdev->dev, "Invalid Virtual NIC opmode\n");
49192diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
49193index 332bb8a..e6adcd1 100644
49194--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
49195+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
49196@@ -1285,7 +1285,7 @@ flash_temp:
49197 int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
49198 {
49199 struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
49200- static const struct qlcnic_dump_operations *fw_dump_ops;
49201+ const struct qlcnic_dump_operations *fw_dump_ops;
49202 struct qlcnic_83xx_dump_template_hdr *hdr_83xx;
49203 u32 entry_offset, dump, no_entries, buf_offset = 0;
49204 int i, k, ops_cnt, ops_index, dump_size = 0;
49205diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
49206index c70ab40..00b28e0 100644
49207--- a/drivers/net/ethernet/realtek/r8169.c
49208+++ b/drivers/net/ethernet/realtek/r8169.c
49209@@ -788,22 +788,22 @@ struct rtl8169_private {
49210 struct mdio_ops {
49211 void (*write)(struct rtl8169_private *, int, int);
49212 int (*read)(struct rtl8169_private *, int);
49213- } mdio_ops;
49214+ } __no_const mdio_ops;
49215
49216 struct pll_power_ops {
49217 void (*down)(struct rtl8169_private *);
49218 void (*up)(struct rtl8169_private *);
49219- } pll_power_ops;
49220+ } __no_const pll_power_ops;
49221
49222 struct jumbo_ops {
49223 void (*enable)(struct rtl8169_private *);
49224 void (*disable)(struct rtl8169_private *);
49225- } jumbo_ops;
49226+ } __no_const jumbo_ops;
49227
49228 struct csi_ops {
49229 void (*write)(struct rtl8169_private *, int, int);
49230 u32 (*read)(struct rtl8169_private *, int);
49231- } csi_ops;
49232+ } __no_const csi_ops;
49233
49234 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
49235 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
49236diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
49237index 6b861e3..204ac86 100644
49238--- a/drivers/net/ethernet/sfc/ptp.c
49239+++ b/drivers/net/ethernet/sfc/ptp.c
49240@@ -822,7 +822,7 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings)
49241 ptp->start.dma_addr);
49242
49243 /* Clear flag that signals MC ready */
49244- ACCESS_ONCE(*start) = 0;
49245+ ACCESS_ONCE_RW(*start) = 0;
49246 rc = efx_mcdi_rpc_start(efx, MC_CMD_PTP, synch_buf,
49247 MC_CMD_PTP_IN_SYNCHRONIZE_LEN);
49248 EFX_BUG_ON_PARANOID(rc);
49249diff --git a/drivers/net/ethernet/sfc/selftest.c b/drivers/net/ethernet/sfc/selftest.c
49250index 10b6173..b605dfd5 100644
49251--- a/drivers/net/ethernet/sfc/selftest.c
49252+++ b/drivers/net/ethernet/sfc/selftest.c
49253@@ -46,7 +46,7 @@ struct efx_loopback_payload {
49254 struct iphdr ip;
49255 struct udphdr udp;
49256 __be16 iteration;
49257- const char msg[64];
49258+ char msg[64];
49259 } __packed;
49260
49261 /* Loopback test source MAC address */
49262diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
49263index 08c483b..2c4a553 100644
49264--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
49265+++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
49266@@ -140,8 +140,8 @@ void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode)
49267
49268 writel(value, ioaddr + MMC_CNTRL);
49269
49270- pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
49271- MMC_CNTRL, value);
49272+// pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
49273+// MMC_CNTRL, value);
49274 }
49275
49276 /* To mask all all interrupts.*/
49277diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
49278index 17e2766..c332f1e 100644
49279--- a/drivers/net/ethernet/via/via-rhine.c
49280+++ b/drivers/net/ethernet/via/via-rhine.c
49281@@ -2514,7 +2514,7 @@ static struct platform_driver rhine_driver_platform = {
49282 }
49283 };
49284
49285-static struct dmi_system_id rhine_dmi_table[] __initdata = {
49286+static const struct dmi_system_id rhine_dmi_table[] __initconst = {
49287 {
49288 .ident = "EPIA-M",
49289 .matches = {
49290diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
49291index 384ca4f..dd7d4f9 100644
49292--- a/drivers/net/hyperv/hyperv_net.h
49293+++ b/drivers/net/hyperv/hyperv_net.h
49294@@ -171,7 +171,7 @@ struct rndis_device {
49295 enum rndis_device_state state;
49296 bool link_state;
49297 bool link_change;
49298- atomic_t new_req_id;
49299+ atomic_unchecked_t new_req_id;
49300
49301 spinlock_t request_lock;
49302 struct list_head req_list;
49303diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
49304index 7816d98..7890614 100644
49305--- a/drivers/net/hyperv/rndis_filter.c
49306+++ b/drivers/net/hyperv/rndis_filter.c
49307@@ -102,7 +102,7 @@ static struct rndis_request *get_rndis_request(struct rndis_device *dev,
49308 * template
49309 */
49310 set = &rndis_msg->msg.set_req;
49311- set->req_id = atomic_inc_return(&dev->new_req_id);
49312+ set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
49313
49314 /* Add to the request list */
49315 spin_lock_irqsave(&dev->request_lock, flags);
49316@@ -918,7 +918,7 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
49317
49318 /* Setup the rndis set */
49319 halt = &request->request_msg.msg.halt_req;
49320- halt->req_id = atomic_inc_return(&dev->new_req_id);
49321+ halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
49322
49323 /* Ignore return since this msg is optional. */
49324 rndis_filter_send_request(dev, request);
49325diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
49326index 34f846b..4a0d5b1 100644
49327--- a/drivers/net/ifb.c
49328+++ b/drivers/net/ifb.c
49329@@ -253,7 +253,7 @@ static int ifb_validate(struct nlattr *tb[], struct nlattr *data[])
49330 return 0;
49331 }
49332
49333-static struct rtnl_link_ops ifb_link_ops __read_mostly = {
49334+static struct rtnl_link_ops ifb_link_ops = {
49335 .kind = "ifb",
49336 .priv_size = sizeof(struct ifb_private),
49337 .setup = ifb_setup,
49338diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
49339index 1df38bd..4bc20b0 100644
49340--- a/drivers/net/macvlan.c
49341+++ b/drivers/net/macvlan.c
49342@@ -335,7 +335,7 @@ static void macvlan_broadcast_enqueue(struct macvlan_port *port,
49343 free_nskb:
49344 kfree_skb(nskb);
49345 err:
49346- atomic_long_inc(&skb->dev->rx_dropped);
49347+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
49348 }
49349
49350 static void macvlan_flush_sources(struct macvlan_port *port,
49351@@ -1459,13 +1459,15 @@ static const struct nla_policy macvlan_policy[IFLA_MACVLAN_MAX + 1] = {
49352 int macvlan_link_register(struct rtnl_link_ops *ops)
49353 {
49354 /* common fields */
49355- ops->priv_size = sizeof(struct macvlan_dev);
49356- ops->validate = macvlan_validate;
49357- ops->maxtype = IFLA_MACVLAN_MAX;
49358- ops->policy = macvlan_policy;
49359- ops->changelink = macvlan_changelink;
49360- ops->get_size = macvlan_get_size;
49361- ops->fill_info = macvlan_fill_info;
49362+ pax_open_kernel();
49363+ *(size_t *)&ops->priv_size = sizeof(struct macvlan_dev);
49364+ *(void **)&ops->validate = macvlan_validate;
49365+ *(int *)&ops->maxtype = IFLA_MACVLAN_MAX;
49366+ *(const void **)&ops->policy = macvlan_policy;
49367+ *(void **)&ops->changelink = macvlan_changelink;
49368+ *(void **)&ops->get_size = macvlan_get_size;
49369+ *(void **)&ops->fill_info = macvlan_fill_info;
49370+ pax_close_kernel();
49371
49372 return rtnl_link_register(ops);
49373 };
49374@@ -1551,7 +1553,7 @@ static int macvlan_device_event(struct notifier_block *unused,
49375 return NOTIFY_DONE;
49376 }
49377
49378-static struct notifier_block macvlan_notifier_block __read_mostly = {
49379+static struct notifier_block macvlan_notifier_block = {
49380 .notifier_call = macvlan_device_event,
49381 };
49382
49383diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
49384index 27ecc5c..f636328 100644
49385--- a/drivers/net/macvtap.c
49386+++ b/drivers/net/macvtap.c
49387@@ -436,7 +436,7 @@ static void macvtap_setup(struct net_device *dev)
49388 dev->tx_queue_len = TUN_READQ_SIZE;
49389 }
49390
49391-static struct rtnl_link_ops macvtap_link_ops __read_mostly = {
49392+static struct rtnl_link_ops macvtap_link_ops = {
49393 .kind = "macvtap",
49394 .setup = macvtap_setup,
49395 .newlink = macvtap_newlink,
49396@@ -1033,7 +1033,7 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
49397
49398 ret = 0;
49399 u = q->flags;
49400- if (copy_to_user(&ifr->ifr_name, vlan->dev->name, IFNAMSIZ) ||
49401+ if (copy_to_user(ifr->ifr_name, vlan->dev->name, IFNAMSIZ) ||
49402 put_user(u, &ifr->ifr_flags))
49403 ret = -EFAULT;
49404 macvtap_put_vlan(vlan);
49405@@ -1217,7 +1217,7 @@ static int macvtap_device_event(struct notifier_block *unused,
49406 return NOTIFY_DONE;
49407 }
49408
49409-static struct notifier_block macvtap_notifier_block __read_mostly = {
49410+static struct notifier_block macvtap_notifier_block = {
49411 .notifier_call = macvtap_device_event,
49412 };
49413
49414diff --git a/drivers/net/nlmon.c b/drivers/net/nlmon.c
49415index 34924df..a747360 100644
49416--- a/drivers/net/nlmon.c
49417+++ b/drivers/net/nlmon.c
49418@@ -154,7 +154,7 @@ static int nlmon_validate(struct nlattr *tb[], struct nlattr *data[])
49419 return 0;
49420 }
49421
49422-static struct rtnl_link_ops nlmon_link_ops __read_mostly = {
49423+static struct rtnl_link_ops nlmon_link_ops = {
49424 .kind = "nlmon",
49425 .priv_size = sizeof(struct nlmon),
49426 .setup = nlmon_setup,
49427diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
49428index d551df6..fa4c2df 100644
49429--- a/drivers/net/phy/phy_device.c
49430+++ b/drivers/net/phy/phy_device.c
49431@@ -218,7 +218,7 @@ EXPORT_SYMBOL(phy_device_create);
49432 * zero on success.
49433 *
49434 */
49435-static int get_phy_c45_ids(struct mii_bus *bus, int addr, u32 *phy_id,
49436+static int get_phy_c45_ids(struct mii_bus *bus, int addr, int *phy_id,
49437 struct phy_c45_device_ids *c45_ids) {
49438 int phy_reg;
49439 int i, reg_addr;
49440@@ -288,7 +288,7 @@ static int get_phy_c45_ids(struct mii_bus *bus, int addr, u32 *phy_id,
49441 * its return value is in turn returned.
49442 *
49443 */
49444-static int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id,
49445+static int get_phy_id(struct mii_bus *bus, int addr, int *phy_id,
49446 bool is_c45, struct phy_c45_device_ids *c45_ids)
49447 {
49448 int phy_reg;
49449@@ -326,7 +326,7 @@ static int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id,
49450 struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45)
49451 {
49452 struct phy_c45_device_ids c45_ids = {0};
49453- u32 phy_id = 0;
49454+ int phy_id = 0;
49455 int r;
49456
49457 r = get_phy_id(bus, addr, &phy_id, is_c45, &c45_ids);
49458diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
49459index 9d15566..5ad4ef6 100644
49460--- a/drivers/net/ppp/ppp_generic.c
49461+++ b/drivers/net/ppp/ppp_generic.c
49462@@ -1022,7 +1022,6 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
49463 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
49464 struct ppp_stats stats;
49465 struct ppp_comp_stats cstats;
49466- char *vers;
49467
49468 switch (cmd) {
49469 case SIOCGPPPSTATS:
49470@@ -1044,8 +1043,7 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
49471 break;
49472
49473 case SIOCGPPPVER:
49474- vers = PPP_VERSION;
49475- if (copy_to_user(addr, vers, strlen(vers) + 1))
49476+ if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
49477 break;
49478 err = 0;
49479 break;
49480diff --git a/drivers/net/slip/slhc.c b/drivers/net/slip/slhc.c
49481index 079f7ad..b2a2bfa7 100644
49482--- a/drivers/net/slip/slhc.c
49483+++ b/drivers/net/slip/slhc.c
49484@@ -487,7 +487,7 @@ slhc_uncompress(struct slcompress *comp, unsigned char *icp, int isize)
49485 register struct tcphdr *thp;
49486 register struct iphdr *ip;
49487 register struct cstate *cs;
49488- int len, hdrlen;
49489+ long len, hdrlen;
49490 unsigned char *cp = icp;
49491
49492 /* We've got a compressed packet; read the change byte */
49493diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
49494index 7d39484..d58499d 100644
49495--- a/drivers/net/team/team.c
49496+++ b/drivers/net/team/team.c
49497@@ -2099,7 +2099,7 @@ static unsigned int team_get_num_rx_queues(void)
49498 return TEAM_DEFAULT_NUM_RX_QUEUES;
49499 }
49500
49501-static struct rtnl_link_ops team_link_ops __read_mostly = {
49502+static struct rtnl_link_ops team_link_ops = {
49503 .kind = DRV_NAME,
49504 .priv_size = sizeof(struct team),
49505 .setup = team_setup,
49506@@ -2889,7 +2889,7 @@ static int team_device_event(struct notifier_block *unused,
49507 return NOTIFY_DONE;
49508 }
49509
49510-static struct notifier_block team_notifier_block __read_mostly = {
49511+static struct notifier_block team_notifier_block = {
49512 .notifier_call = team_device_event,
49513 };
49514
49515diff --git a/drivers/net/tun.c b/drivers/net/tun.c
49516index 857dca4..642f532 100644
49517--- a/drivers/net/tun.c
49518+++ b/drivers/net/tun.c
49519@@ -1421,7 +1421,7 @@ static int tun_validate(struct nlattr *tb[], struct nlattr *data[])
49520 return -EINVAL;
49521 }
49522
49523-static struct rtnl_link_ops tun_link_ops __read_mostly = {
49524+static struct rtnl_link_ops tun_link_ops = {
49525 .kind = DRV_NAME,
49526 .priv_size = sizeof(struct tun_struct),
49527 .setup = tun_setup,
49528@@ -1830,7 +1830,7 @@ unlock:
49529 }
49530
49531 static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
49532- unsigned long arg, int ifreq_len)
49533+ unsigned long arg, size_t ifreq_len)
49534 {
49535 struct tun_file *tfile = file->private_data;
49536 struct tun_struct *tun;
49537@@ -1844,6 +1844,9 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
49538 int le;
49539 int ret;
49540
49541+ if (ifreq_len > sizeof ifr)
49542+ return -EFAULT;
49543+
49544 if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || _IOC_TYPE(cmd) == 0x89) {
49545 if (copy_from_user(&ifr, argp, ifreq_len))
49546 return -EFAULT;
49547diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
49548index 778e915..58c4d95 100644
49549--- a/drivers/net/usb/hso.c
49550+++ b/drivers/net/usb/hso.c
49551@@ -70,7 +70,7 @@
49552 #include <asm/byteorder.h>
49553 #include <linux/serial_core.h>
49554 #include <linux/serial.h>
49555-
49556+#include <asm/local.h>
49557
49558 #define MOD_AUTHOR "Option Wireless"
49559 #define MOD_DESCRIPTION "USB High Speed Option driver"
49560@@ -1183,7 +1183,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
49561 struct urb *urb;
49562
49563 urb = serial->rx_urb[0];
49564- if (serial->port.count > 0) {
49565+ if (atomic_read(&serial->port.count) > 0) {
49566 count = put_rxbuf_data(urb, serial);
49567 if (count == -1)
49568 return;
49569@@ -1221,7 +1221,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
49570 DUMP1(urb->transfer_buffer, urb->actual_length);
49571
49572 /* Anyone listening? */
49573- if (serial->port.count == 0)
49574+ if (atomic_read(&serial->port.count) == 0)
49575 return;
49576
49577 if (serial->parent->port_spec & HSO_INFO_CRC_BUG)
49578@@ -1282,8 +1282,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
49579 tty_port_tty_set(&serial->port, tty);
49580
49581 /* check for port already opened, if not set the termios */
49582- serial->port.count++;
49583- if (serial->port.count == 1) {
49584+ if (atomic_inc_return(&serial->port.count) == 1) {
49585 serial->rx_state = RX_IDLE;
49586 /* Force default termio settings */
49587 _hso_serial_set_termios(tty, NULL);
49588@@ -1293,7 +1292,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
49589 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
49590 if (result) {
49591 hso_stop_serial_device(serial->parent);
49592- serial->port.count--;
49593+ atomic_dec(&serial->port.count);
49594 } else {
49595 kref_get(&serial->parent->ref);
49596 }
49597@@ -1331,10 +1330,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
49598
49599 /* reset the rts and dtr */
49600 /* do the actual close */
49601- serial->port.count--;
49602+ atomic_dec(&serial->port.count);
49603
49604- if (serial->port.count <= 0) {
49605- serial->port.count = 0;
49606+ if (atomic_read(&serial->port.count) <= 0) {
49607+ atomic_set(&serial->port.count, 0);
49608 tty_port_tty_set(&serial->port, NULL);
49609 if (!usb_gone)
49610 hso_stop_serial_device(serial->parent);
49611@@ -1417,7 +1416,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
49612
49613 /* the actual setup */
49614 spin_lock_irqsave(&serial->serial_lock, flags);
49615- if (serial->port.count)
49616+ if (atomic_read(&serial->port.count))
49617 _hso_serial_set_termios(tty, old);
49618 else
49619 tty->termios = *old;
49620@@ -1886,7 +1885,7 @@ static void intr_callback(struct urb *urb)
49621 D1("Pending read interrupt on port %d\n", i);
49622 spin_lock(&serial->serial_lock);
49623 if (serial->rx_state == RX_IDLE &&
49624- serial->port.count > 0) {
49625+ atomic_read(&serial->port.count) > 0) {
49626 /* Setup and send a ctrl req read on
49627 * port i */
49628 if (!serial->rx_urb_filled[0]) {
49629@@ -3053,7 +3052,7 @@ static int hso_resume(struct usb_interface *iface)
49630 /* Start all serial ports */
49631 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
49632 if (serial_table[i] && (serial_table[i]->interface == iface)) {
49633- if (dev2ser(serial_table[i])->port.count) {
49634+ if (atomic_read(&dev2ser(serial_table[i])->port.count)) {
49635 result =
49636 hso_start_serial_device(serial_table[i], GFP_NOIO);
49637 hso_kick_transmit(dev2ser(serial_table[i]));
49638diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
49639index 9f7c0ab..1577b4a 100644
49640--- a/drivers/net/usb/r8152.c
49641+++ b/drivers/net/usb/r8152.c
49642@@ -601,7 +601,7 @@ struct r8152 {
49643 void (*unload)(struct r8152 *);
49644 int (*eee_get)(struct r8152 *, struct ethtool_eee *);
49645 int (*eee_set)(struct r8152 *, struct ethtool_eee *);
49646- } rtl_ops;
49647+ } __no_const rtl_ops;
49648
49649 int intr_interval;
49650 u32 saved_wolopts;
49651diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c
49652index a2515887..6d13233 100644
49653--- a/drivers/net/usb/sierra_net.c
49654+++ b/drivers/net/usb/sierra_net.c
49655@@ -51,7 +51,7 @@ static const char driver_name[] = "sierra_net";
49656 /* atomic counter partially included in MAC address to make sure 2 devices
49657 * do not end up with the same MAC - concept breaks in case of > 255 ifaces
49658 */
49659-static atomic_t iface_counter = ATOMIC_INIT(0);
49660+static atomic_unchecked_t iface_counter = ATOMIC_INIT(0);
49661
49662 /*
49663 * SYNC Timer Delay definition used to set the expiry time
49664@@ -697,7 +697,7 @@ static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf)
49665 dev->net->netdev_ops = &sierra_net_device_ops;
49666
49667 /* change MAC addr to include, ifacenum, and to be unique */
49668- dev->net->dev_addr[ETH_ALEN-2] = atomic_inc_return(&iface_counter);
49669+ dev->net->dev_addr[ETH_ALEN-2] = atomic_inc_return_unchecked(&iface_counter);
49670 dev->net->dev_addr[ETH_ALEN-1] = ifacenum;
49671
49672 /* we will have to manufacture ethernet headers, prepare template */
49673diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
49674index 777757a..395a767 100644
49675--- a/drivers/net/usb/usbnet.c
49676+++ b/drivers/net/usb/usbnet.c
49677@@ -1285,7 +1285,7 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
49678 struct net_device *net)
49679 {
49680 struct usbnet *dev = netdev_priv(net);
49681- int length;
49682+ unsigned int length;
49683 struct urb *urb = NULL;
49684 struct skb_data *entry;
49685 struct driver_info *info = dev->driver_info;
49686@@ -1413,7 +1413,7 @@ not_drop:
49687 }
49688 } else
49689 netif_dbg(dev, tx_queued, dev->net,
49690- "> tx, len %d, type 0x%x\n", length, skb->protocol);
49691+ "> tx, len %u, type 0x%x\n", length, skb->protocol);
49692 #ifdef CONFIG_PM
49693 deferred:
49694 #endif
49695diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
49696index 59b0e97..a6ed579 100644
49697--- a/drivers/net/virtio_net.c
49698+++ b/drivers/net/virtio_net.c
49699@@ -48,7 +48,7 @@ module_param(gso, bool, 0444);
49700 #define RECEIVE_AVG_WEIGHT 64
49701
49702 /* Minimum alignment for mergeable packet buffers. */
49703-#define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, 256)
49704+#define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, 256UL)
49705
49706 #define VIRTNET_DRIVER_VERSION "1.0.0"
49707
49708diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
49709index fceb637..37c70fd 100644
49710--- a/drivers/net/vxlan.c
49711+++ b/drivers/net/vxlan.c
49712@@ -2935,7 +2935,7 @@ static struct net *vxlan_get_link_net(const struct net_device *dev)
49713 return vxlan->net;
49714 }
49715
49716-static struct rtnl_link_ops vxlan_link_ops __read_mostly = {
49717+static struct rtnl_link_ops vxlan_link_ops = {
49718 .kind = "vxlan",
49719 .maxtype = IFLA_VXLAN_MAX,
49720 .policy = vxlan_policy,
49721@@ -2983,7 +2983,7 @@ static int vxlan_lowerdev_event(struct notifier_block *unused,
49722 return NOTIFY_DONE;
49723 }
49724
49725-static struct notifier_block vxlan_notifier_block __read_mostly = {
49726+static struct notifier_block vxlan_notifier_block = {
49727 .notifier_call = vxlan_lowerdev_event,
49728 };
49729
49730diff --git a/drivers/net/wan/lmc/lmc_media.c b/drivers/net/wan/lmc/lmc_media.c
49731index 5920c99..ff2e4a5 100644
49732--- a/drivers/net/wan/lmc/lmc_media.c
49733+++ b/drivers/net/wan/lmc/lmc_media.c
49734@@ -95,62 +95,63 @@ static inline void write_av9110_bit (lmc_softc_t *, int);
49735 static void write_av9110(lmc_softc_t *, u32, u32, u32, u32, u32);
49736
49737 lmc_media_t lmc_ds3_media = {
49738- lmc_ds3_init, /* special media init stuff */
49739- lmc_ds3_default, /* reset to default state */
49740- lmc_ds3_set_status, /* reset status to state provided */
49741- lmc_dummy_set_1, /* set clock source */
49742- lmc_dummy_set2_1, /* set line speed */
49743- lmc_ds3_set_100ft, /* set cable length */
49744- lmc_ds3_set_scram, /* set scrambler */
49745- lmc_ds3_get_link_status, /* get link status */
49746- lmc_dummy_set_1, /* set link status */
49747- lmc_ds3_set_crc_length, /* set CRC length */
49748- lmc_dummy_set_1, /* set T1 or E1 circuit type */
49749- lmc_ds3_watchdog
49750+ .init = lmc_ds3_init, /* special media init stuff */
49751+ .defaults = lmc_ds3_default, /* reset to default state */
49752+ .set_status = lmc_ds3_set_status, /* reset status to state provided */
49753+ .set_clock_source = lmc_dummy_set_1, /* set clock source */
49754+ .set_speed = lmc_dummy_set2_1, /* set line speed */
49755+ .set_cable_length = lmc_ds3_set_100ft, /* set cable length */
49756+ .set_scrambler = lmc_ds3_set_scram, /* set scrambler */
49757+ .get_link_status = lmc_ds3_get_link_status, /* get link status */
49758+ .set_link_status = lmc_dummy_set_1, /* set link status */
49759+ .set_crc_length = lmc_ds3_set_crc_length, /* set CRC length */
49760+ .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */
49761+ .watchdog = lmc_ds3_watchdog
49762 };
49763
49764 lmc_media_t lmc_hssi_media = {
49765- lmc_hssi_init, /* special media init stuff */
49766- lmc_hssi_default, /* reset to default state */
49767- lmc_hssi_set_status, /* reset status to state provided */
49768- lmc_hssi_set_clock, /* set clock source */
49769- lmc_dummy_set2_1, /* set line speed */
49770- lmc_dummy_set_1, /* set cable length */
49771- lmc_dummy_set_1, /* set scrambler */
49772- lmc_hssi_get_link_status, /* get link status */
49773- lmc_hssi_set_link_status, /* set link status */
49774- lmc_hssi_set_crc_length, /* set CRC length */
49775- lmc_dummy_set_1, /* set T1 or E1 circuit type */
49776- lmc_hssi_watchdog
49777+ .init = lmc_hssi_init, /* special media init stuff */
49778+ .defaults = lmc_hssi_default, /* reset to default state */
49779+ .set_status = lmc_hssi_set_status, /* reset status to state provided */
49780+ .set_clock_source = lmc_hssi_set_clock, /* set clock source */
49781+ .set_speed = lmc_dummy_set2_1, /* set line speed */
49782+ .set_cable_length = lmc_dummy_set_1, /* set cable length */
49783+ .set_scrambler = lmc_dummy_set_1, /* set scrambler */
49784+ .get_link_status = lmc_hssi_get_link_status, /* get link status */
49785+ .set_link_status = lmc_hssi_set_link_status, /* set link status */
49786+ .set_crc_length = lmc_hssi_set_crc_length, /* set CRC length */
49787+ .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */
49788+ .watchdog = lmc_hssi_watchdog
49789 };
49790
49791-lmc_media_t lmc_ssi_media = { lmc_ssi_init, /* special media init stuff */
49792- lmc_ssi_default, /* reset to default state */
49793- lmc_ssi_set_status, /* reset status to state provided */
49794- lmc_ssi_set_clock, /* set clock source */
49795- lmc_ssi_set_speed, /* set line speed */
49796- lmc_dummy_set_1, /* set cable length */
49797- lmc_dummy_set_1, /* set scrambler */
49798- lmc_ssi_get_link_status, /* get link status */
49799- lmc_ssi_set_link_status, /* set link status */
49800- lmc_ssi_set_crc_length, /* set CRC length */
49801- lmc_dummy_set_1, /* set T1 or E1 circuit type */
49802- lmc_ssi_watchdog
49803+lmc_media_t lmc_ssi_media = {
49804+ .init = lmc_ssi_init, /* special media init stuff */
49805+ .defaults = lmc_ssi_default, /* reset to default state */
49806+ .set_status = lmc_ssi_set_status, /* reset status to state provided */
49807+ .set_clock_source = lmc_ssi_set_clock, /* set clock source */
49808+ .set_speed = lmc_ssi_set_speed, /* set line speed */
49809+ .set_cable_length = lmc_dummy_set_1, /* set cable length */
49810+ .set_scrambler = lmc_dummy_set_1, /* set scrambler */
49811+ .get_link_status = lmc_ssi_get_link_status, /* get link status */
49812+ .set_link_status = lmc_ssi_set_link_status, /* set link status */
49813+ .set_crc_length = lmc_ssi_set_crc_length, /* set CRC length */
49814+ .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */
49815+ .watchdog = lmc_ssi_watchdog
49816 };
49817
49818 lmc_media_t lmc_t1_media = {
49819- lmc_t1_init, /* special media init stuff */
49820- lmc_t1_default, /* reset to default state */
49821- lmc_t1_set_status, /* reset status to state provided */
49822- lmc_t1_set_clock, /* set clock source */
49823- lmc_dummy_set2_1, /* set line speed */
49824- lmc_dummy_set_1, /* set cable length */
49825- lmc_dummy_set_1, /* set scrambler */
49826- lmc_t1_get_link_status, /* get link status */
49827- lmc_dummy_set_1, /* set link status */
49828- lmc_t1_set_crc_length, /* set CRC length */
49829- lmc_t1_set_circuit_type, /* set T1 or E1 circuit type */
49830- lmc_t1_watchdog
49831+ .init = lmc_t1_init, /* special media init stuff */
49832+ .defaults = lmc_t1_default, /* reset to default state */
49833+ .set_status = lmc_t1_set_status, /* reset status to state provided */
49834+ .set_clock_source = lmc_t1_set_clock, /* set clock source */
49835+ .set_speed = lmc_dummy_set2_1, /* set line speed */
49836+ .set_cable_length = lmc_dummy_set_1, /* set cable length */
49837+ .set_scrambler = lmc_dummy_set_1, /* set scrambler */
49838+ .get_link_status = lmc_t1_get_link_status, /* get link status */
49839+ .set_link_status = lmc_dummy_set_1, /* set link status */
49840+ .set_crc_length = lmc_t1_set_crc_length, /* set CRC length */
49841+ .set_circuit_type = lmc_t1_set_circuit_type, /* set T1 or E1 circuit type */
49842+ .watchdog = lmc_t1_watchdog
49843 };
49844
49845 static void
49846diff --git a/drivers/net/wan/z85230.c b/drivers/net/wan/z85230.c
49847index feacc3b..5bac0de 100644
49848--- a/drivers/net/wan/z85230.c
49849+++ b/drivers/net/wan/z85230.c
49850@@ -485,9 +485,9 @@ static void z8530_status(struct z8530_channel *chan)
49851
49852 struct z8530_irqhandler z8530_sync =
49853 {
49854- z8530_rx,
49855- z8530_tx,
49856- z8530_status
49857+ .rx = z8530_rx,
49858+ .tx = z8530_tx,
49859+ .status = z8530_status
49860 };
49861
49862 EXPORT_SYMBOL(z8530_sync);
49863@@ -605,15 +605,15 @@ static void z8530_dma_status(struct z8530_channel *chan)
49864 }
49865
49866 static struct z8530_irqhandler z8530_dma_sync = {
49867- z8530_dma_rx,
49868- z8530_dma_tx,
49869- z8530_dma_status
49870+ .rx = z8530_dma_rx,
49871+ .tx = z8530_dma_tx,
49872+ .status = z8530_dma_status
49873 };
49874
49875 static struct z8530_irqhandler z8530_txdma_sync = {
49876- z8530_rx,
49877- z8530_dma_tx,
49878- z8530_dma_status
49879+ .rx = z8530_rx,
49880+ .tx = z8530_dma_tx,
49881+ .status = z8530_dma_status
49882 };
49883
49884 /**
49885@@ -680,9 +680,9 @@ static void z8530_status_clear(struct z8530_channel *chan)
49886
49887 struct z8530_irqhandler z8530_nop=
49888 {
49889- z8530_rx_clear,
49890- z8530_tx_clear,
49891- z8530_status_clear
49892+ .rx = z8530_rx_clear,
49893+ .tx = z8530_tx_clear,
49894+ .status = z8530_status_clear
49895 };
49896
49897
49898diff --git a/drivers/net/wimax/i2400m/rx.c b/drivers/net/wimax/i2400m/rx.c
49899index 0b60295..b8bfa5b 100644
49900--- a/drivers/net/wimax/i2400m/rx.c
49901+++ b/drivers/net/wimax/i2400m/rx.c
49902@@ -1359,7 +1359,7 @@ int i2400m_rx_setup(struct i2400m *i2400m)
49903 if (i2400m->rx_roq == NULL)
49904 goto error_roq_alloc;
49905
49906- rd = kcalloc(I2400M_RO_CIN + 1, sizeof(*i2400m->rx_roq[0].log),
49907+ rd = kcalloc(sizeof(*i2400m->rx_roq[0].log), I2400M_RO_CIN + 1,
49908 GFP_KERNEL);
49909 if (rd == NULL) {
49910 result = -ENOMEM;
49911diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
49912index e71a2ce..2268d61 100644
49913--- a/drivers/net/wireless/airo.c
49914+++ b/drivers/net/wireless/airo.c
49915@@ -7846,7 +7846,7 @@ static int writerids(struct net_device *dev, aironet_ioctl *comp) {
49916 struct airo_info *ai = dev->ml_priv;
49917 int ridcode;
49918 int enabled;
49919- static int (* writer)(struct airo_info *, u16 rid, const void *, int, int);
49920+ int (* writer)(struct airo_info *, u16 rid, const void *, int, int);
49921 unsigned char *iobuf;
49922
49923 /* Only super-user can write RIDs */
49924diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c
49925index da92bfa..5a9001a 100644
49926--- a/drivers/net/wireless/at76c50x-usb.c
49927+++ b/drivers/net/wireless/at76c50x-usb.c
49928@@ -353,7 +353,7 @@ static int at76_dfu_get_state(struct usb_device *udev, u8 *state)
49929 }
49930
49931 /* Convert timeout from the DFU status to jiffies */
49932-static inline unsigned long at76_get_timeout(struct dfu_status *s)
49933+static inline unsigned long __intentional_overflow(-1) at76_get_timeout(struct dfu_status *s)
49934 {
49935 return msecs_to_jiffies((s->poll_timeout[2] << 16)
49936 | (s->poll_timeout[1] << 8)
49937diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
49938index 2fd9e18..3f55bdd 100644
49939--- a/drivers/net/wireless/ath/ath10k/htc.c
49940+++ b/drivers/net/wireless/ath/ath10k/htc.c
49941@@ -849,7 +849,10 @@ int ath10k_htc_start(struct ath10k_htc *htc)
49942 /* registered target arrival callback from the HIF layer */
49943 int ath10k_htc_init(struct ath10k *ar)
49944 {
49945- struct ath10k_hif_cb htc_callbacks;
49946+ static struct ath10k_hif_cb htc_callbacks = {
49947+ .rx_completion = ath10k_htc_rx_completion_handler,
49948+ .tx_completion = ath10k_htc_tx_completion_handler,
49949+ };
49950 struct ath10k_htc_ep *ep = NULL;
49951 struct ath10k_htc *htc = &ar->htc;
49952
49953@@ -858,8 +861,6 @@ int ath10k_htc_init(struct ath10k *ar)
49954 ath10k_htc_reset_endpoint_states(htc);
49955
49956 /* setup HIF layer callbacks */
49957- htc_callbacks.rx_completion = ath10k_htc_rx_completion_handler;
49958- htc_callbacks.tx_completion = ath10k_htc_tx_completion_handler;
49959 htc->ar = ar;
49960
49961 /* Get HIF default pipe for HTC message exchange */
49962diff --git a/drivers/net/wireless/ath/ath10k/htc.h b/drivers/net/wireless/ath/ath10k/htc.h
49963index 527179c..a890150 100644
49964--- a/drivers/net/wireless/ath/ath10k/htc.h
49965+++ b/drivers/net/wireless/ath/ath10k/htc.h
49966@@ -270,13 +270,13 @@ enum ath10k_htc_ep_id {
49967
49968 struct ath10k_htc_ops {
49969 void (*target_send_suspend_complete)(struct ath10k *ar);
49970-};
49971+} __no_const;
49972
49973 struct ath10k_htc_ep_ops {
49974 void (*ep_tx_complete)(struct ath10k *, struct sk_buff *);
49975 void (*ep_rx_complete)(struct ath10k *, struct sk_buff *);
49976 void (*ep_tx_credits)(struct ath10k *);
49977-};
49978+} __no_const;
49979
49980 /* service connection information */
49981 struct ath10k_htc_svc_conn_req {
49982diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
49983index f816909..e56cd8b 100644
49984--- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
49985+++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
49986@@ -220,8 +220,8 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
49987 ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
49988 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
49989
49990- ACCESS_ONCE(ads->ds_link) = i->link;
49991- ACCESS_ONCE(ads->ds_data) = i->buf_addr[0];
49992+ ACCESS_ONCE_RW(ads->ds_link) = i->link;
49993+ ACCESS_ONCE_RW(ads->ds_data) = i->buf_addr[0];
49994
49995 ctl1 = i->buf_len[0] | (i->is_last ? 0 : AR_TxMore);
49996 ctl6 = SM(i->keytype, AR_EncrType);
49997@@ -235,26 +235,26 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
49998
49999 if ((i->is_first || i->is_last) &&
50000 i->aggr != AGGR_BUF_MIDDLE && i->aggr != AGGR_BUF_LAST) {
50001- ACCESS_ONCE(ads->ds_ctl2) = set11nTries(i->rates, 0)
50002+ ACCESS_ONCE_RW(ads->ds_ctl2) = set11nTries(i->rates, 0)
50003 | set11nTries(i->rates, 1)
50004 | set11nTries(i->rates, 2)
50005 | set11nTries(i->rates, 3)
50006 | (i->dur_update ? AR_DurUpdateEna : 0)
50007 | SM(0, AR_BurstDur);
50008
50009- ACCESS_ONCE(ads->ds_ctl3) = set11nRate(i->rates, 0)
50010+ ACCESS_ONCE_RW(ads->ds_ctl3) = set11nRate(i->rates, 0)
50011 | set11nRate(i->rates, 1)
50012 | set11nRate(i->rates, 2)
50013 | set11nRate(i->rates, 3);
50014 } else {
50015- ACCESS_ONCE(ads->ds_ctl2) = 0;
50016- ACCESS_ONCE(ads->ds_ctl3) = 0;
50017+ ACCESS_ONCE_RW(ads->ds_ctl2) = 0;
50018+ ACCESS_ONCE_RW(ads->ds_ctl3) = 0;
50019 }
50020
50021 if (!i->is_first) {
50022- ACCESS_ONCE(ads->ds_ctl0) = 0;
50023- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
50024- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
50025+ ACCESS_ONCE_RW(ads->ds_ctl0) = 0;
50026+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
50027+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
50028 return;
50029 }
50030
50031@@ -279,7 +279,7 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
50032 break;
50033 }
50034
50035- ACCESS_ONCE(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
50036+ ACCESS_ONCE_RW(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
50037 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
50038 | SM(i->txpower[0], AR_XmitPower0)
50039 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
50040@@ -289,27 +289,27 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
50041 | (i->flags & ATH9K_TXDESC_RTSENA ? AR_RTSEnable :
50042 (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0));
50043
50044- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
50045- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
50046+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
50047+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
50048
50049 if (i->aggr == AGGR_BUF_MIDDLE || i->aggr == AGGR_BUF_LAST)
50050 return;
50051
50052- ACCESS_ONCE(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
50053+ ACCESS_ONCE_RW(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
50054 | set11nPktDurRTSCTS(i->rates, 1);
50055
50056- ACCESS_ONCE(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
50057+ ACCESS_ONCE_RW(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
50058 | set11nPktDurRTSCTS(i->rates, 3);
50059
50060- ACCESS_ONCE(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
50061+ ACCESS_ONCE_RW(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
50062 | set11nRateFlags(i->rates, 1)
50063 | set11nRateFlags(i->rates, 2)
50064 | set11nRateFlags(i->rates, 3)
50065 | SM(i->rtscts_rate, AR_RTSCTSRate);
50066
50067- ACCESS_ONCE(ads->ds_ctl9) = SM(i->txpower[1], AR_XmitPower1);
50068- ACCESS_ONCE(ads->ds_ctl10) = SM(i->txpower[2], AR_XmitPower2);
50069- ACCESS_ONCE(ads->ds_ctl11) = SM(i->txpower[3], AR_XmitPower3);
50070+ ACCESS_ONCE_RW(ads->ds_ctl9) = SM(i->txpower[1], AR_XmitPower1);
50071+ ACCESS_ONCE_RW(ads->ds_ctl10) = SM(i->txpower[2], AR_XmitPower2);
50072+ ACCESS_ONCE_RW(ads->ds_ctl11) = SM(i->txpower[3], AR_XmitPower3);
50073 }
50074
50075 static int ar9002_hw_proc_txdesc(struct ath_hw *ah, void *ds,
50076diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
50077index da84b70..83e4978 100644
50078--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
50079+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
50080@@ -39,47 +39,47 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
50081 (i->qcu << AR_TxQcuNum_S) | desc_len;
50082
50083 checksum += val;
50084- ACCESS_ONCE(ads->info) = val;
50085+ ACCESS_ONCE_RW(ads->info) = val;
50086
50087 checksum += i->link;
50088- ACCESS_ONCE(ads->link) = i->link;
50089+ ACCESS_ONCE_RW(ads->link) = i->link;
50090
50091 checksum += i->buf_addr[0];
50092- ACCESS_ONCE(ads->data0) = i->buf_addr[0];
50093+ ACCESS_ONCE_RW(ads->data0) = i->buf_addr[0];
50094 checksum += i->buf_addr[1];
50095- ACCESS_ONCE(ads->data1) = i->buf_addr[1];
50096+ ACCESS_ONCE_RW(ads->data1) = i->buf_addr[1];
50097 checksum += i->buf_addr[2];
50098- ACCESS_ONCE(ads->data2) = i->buf_addr[2];
50099+ ACCESS_ONCE_RW(ads->data2) = i->buf_addr[2];
50100 checksum += i->buf_addr[3];
50101- ACCESS_ONCE(ads->data3) = i->buf_addr[3];
50102+ ACCESS_ONCE_RW(ads->data3) = i->buf_addr[3];
50103
50104 checksum += (val = (i->buf_len[0] << AR_BufLen_S) & AR_BufLen);
50105- ACCESS_ONCE(ads->ctl3) = val;
50106+ ACCESS_ONCE_RW(ads->ctl3) = val;
50107 checksum += (val = (i->buf_len[1] << AR_BufLen_S) & AR_BufLen);
50108- ACCESS_ONCE(ads->ctl5) = val;
50109+ ACCESS_ONCE_RW(ads->ctl5) = val;
50110 checksum += (val = (i->buf_len[2] << AR_BufLen_S) & AR_BufLen);
50111- ACCESS_ONCE(ads->ctl7) = val;
50112+ ACCESS_ONCE_RW(ads->ctl7) = val;
50113 checksum += (val = (i->buf_len[3] << AR_BufLen_S) & AR_BufLen);
50114- ACCESS_ONCE(ads->ctl9) = val;
50115+ ACCESS_ONCE_RW(ads->ctl9) = val;
50116
50117 checksum = (u16) (((checksum & 0xffff) + (checksum >> 16)) & 0xffff);
50118- ACCESS_ONCE(ads->ctl10) = checksum;
50119+ ACCESS_ONCE_RW(ads->ctl10) = checksum;
50120
50121 if (i->is_first || i->is_last) {
50122- ACCESS_ONCE(ads->ctl13) = set11nTries(i->rates, 0)
50123+ ACCESS_ONCE_RW(ads->ctl13) = set11nTries(i->rates, 0)
50124 | set11nTries(i->rates, 1)
50125 | set11nTries(i->rates, 2)
50126 | set11nTries(i->rates, 3)
50127 | (i->dur_update ? AR_DurUpdateEna : 0)
50128 | SM(0, AR_BurstDur);
50129
50130- ACCESS_ONCE(ads->ctl14) = set11nRate(i->rates, 0)
50131+ ACCESS_ONCE_RW(ads->ctl14) = set11nRate(i->rates, 0)
50132 | set11nRate(i->rates, 1)
50133 | set11nRate(i->rates, 2)
50134 | set11nRate(i->rates, 3);
50135 } else {
50136- ACCESS_ONCE(ads->ctl13) = 0;
50137- ACCESS_ONCE(ads->ctl14) = 0;
50138+ ACCESS_ONCE_RW(ads->ctl13) = 0;
50139+ ACCESS_ONCE_RW(ads->ctl14) = 0;
50140 }
50141
50142 ads->ctl20 = 0;
50143@@ -89,17 +89,17 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
50144
50145 ctl17 = SM(i->keytype, AR_EncrType);
50146 if (!i->is_first) {
50147- ACCESS_ONCE(ads->ctl11) = 0;
50148- ACCESS_ONCE(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
50149- ACCESS_ONCE(ads->ctl15) = 0;
50150- ACCESS_ONCE(ads->ctl16) = 0;
50151- ACCESS_ONCE(ads->ctl17) = ctl17;
50152- ACCESS_ONCE(ads->ctl18) = 0;
50153- ACCESS_ONCE(ads->ctl19) = 0;
50154+ ACCESS_ONCE_RW(ads->ctl11) = 0;
50155+ ACCESS_ONCE_RW(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
50156+ ACCESS_ONCE_RW(ads->ctl15) = 0;
50157+ ACCESS_ONCE_RW(ads->ctl16) = 0;
50158+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
50159+ ACCESS_ONCE_RW(ads->ctl18) = 0;
50160+ ACCESS_ONCE_RW(ads->ctl19) = 0;
50161 return;
50162 }
50163
50164- ACCESS_ONCE(ads->ctl11) = (i->pkt_len & AR_FrameLen)
50165+ ACCESS_ONCE_RW(ads->ctl11) = (i->pkt_len & AR_FrameLen)
50166 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
50167 | SM(i->txpower[0], AR_XmitPower0)
50168 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
50169@@ -135,26 +135,26 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
50170 val = (i->flags & ATH9K_TXDESC_PAPRD) >> ATH9K_TXDESC_PAPRD_S;
50171 ctl12 |= SM(val, AR_PAPRDChainMask);
50172
50173- ACCESS_ONCE(ads->ctl12) = ctl12;
50174- ACCESS_ONCE(ads->ctl17) = ctl17;
50175+ ACCESS_ONCE_RW(ads->ctl12) = ctl12;
50176+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
50177
50178- ACCESS_ONCE(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
50179+ ACCESS_ONCE_RW(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
50180 | set11nPktDurRTSCTS(i->rates, 1);
50181
50182- ACCESS_ONCE(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
50183+ ACCESS_ONCE_RW(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
50184 | set11nPktDurRTSCTS(i->rates, 3);
50185
50186- ACCESS_ONCE(ads->ctl18) = set11nRateFlags(i->rates, 0)
50187+ ACCESS_ONCE_RW(ads->ctl18) = set11nRateFlags(i->rates, 0)
50188 | set11nRateFlags(i->rates, 1)
50189 | set11nRateFlags(i->rates, 2)
50190 | set11nRateFlags(i->rates, 3)
50191 | SM(i->rtscts_rate, AR_RTSCTSRate);
50192
50193- ACCESS_ONCE(ads->ctl19) = AR_Not_Sounding;
50194+ ACCESS_ONCE_RW(ads->ctl19) = AR_Not_Sounding;
50195
50196- ACCESS_ONCE(ads->ctl20) = SM(i->txpower[1], AR_XmitPower1);
50197- ACCESS_ONCE(ads->ctl21) = SM(i->txpower[2], AR_XmitPower2);
50198- ACCESS_ONCE(ads->ctl22) = SM(i->txpower[3], AR_XmitPower3);
50199+ ACCESS_ONCE_RW(ads->ctl20) = SM(i->txpower[1], AR_XmitPower1);
50200+ ACCESS_ONCE_RW(ads->ctl21) = SM(i->txpower[2], AR_XmitPower2);
50201+ ACCESS_ONCE_RW(ads->ctl22) = SM(i->txpower[3], AR_XmitPower3);
50202 }
50203
50204 static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads)
50205diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
50206index e82e570..8c3cf90 100644
50207--- a/drivers/net/wireless/ath/ath9k/hw.h
50208+++ b/drivers/net/wireless/ath/ath9k/hw.h
50209@@ -646,7 +646,7 @@ struct ath_hw_private_ops {
50210
50211 /* ANI */
50212 void (*ani_cache_ini_regs)(struct ath_hw *ah);
50213-};
50214+} __no_const;
50215
50216 /**
50217 * struct ath_spec_scan - parameters for Atheros spectral scan
50218@@ -722,7 +722,7 @@ struct ath_hw_ops {
50219 #ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
50220 void (*set_bt_ant_diversity)(struct ath_hw *hw, bool enable);
50221 #endif
50222-};
50223+} __no_const;
50224
50225 struct ath_nf_limits {
50226 s16 max;
50227diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
50228index 9ede991..a8f08fb 100644
50229--- a/drivers/net/wireless/ath/ath9k/main.c
50230+++ b/drivers/net/wireless/ath/ath9k/main.c
50231@@ -2537,16 +2537,18 @@ void ath9k_fill_chanctx_ops(void)
50232 if (!ath9k_is_chanctx_enabled())
50233 return;
50234
50235- ath9k_ops.hw_scan = ath9k_hw_scan;
50236- ath9k_ops.cancel_hw_scan = ath9k_cancel_hw_scan;
50237- ath9k_ops.remain_on_channel = ath9k_remain_on_channel;
50238- ath9k_ops.cancel_remain_on_channel = ath9k_cancel_remain_on_channel;
50239- ath9k_ops.add_chanctx = ath9k_add_chanctx;
50240- ath9k_ops.remove_chanctx = ath9k_remove_chanctx;
50241- ath9k_ops.change_chanctx = ath9k_change_chanctx;
50242- ath9k_ops.assign_vif_chanctx = ath9k_assign_vif_chanctx;
50243- ath9k_ops.unassign_vif_chanctx = ath9k_unassign_vif_chanctx;
50244- ath9k_ops.mgd_prepare_tx = ath9k_mgd_prepare_tx;
50245+ pax_open_kernel();
50246+ *(void **)&ath9k_ops.hw_scan = ath9k_hw_scan;
50247+ *(void **)&ath9k_ops.cancel_hw_scan = ath9k_cancel_hw_scan;
50248+ *(void **)&ath9k_ops.remain_on_channel = ath9k_remain_on_channel;
50249+ *(void **)&ath9k_ops.cancel_remain_on_channel = ath9k_cancel_remain_on_channel;
50250+ *(void **)&ath9k_ops.add_chanctx = ath9k_add_chanctx;
50251+ *(void **)&ath9k_ops.remove_chanctx = ath9k_remove_chanctx;
50252+ *(void **)&ath9k_ops.change_chanctx = ath9k_change_chanctx;
50253+ *(void **)&ath9k_ops.assign_vif_chanctx = ath9k_assign_vif_chanctx;
50254+ *(void **)&ath9k_ops.unassign_vif_chanctx = ath9k_unassign_vif_chanctx;
50255+ *(void **)&ath9k_ops.mgd_prepare_tx = ath9k_mgd_prepare_tx;
50256+ pax_close_kernel();
50257 }
50258
50259 #endif
50260diff --git a/drivers/net/wireless/b43/phy_lp.c b/drivers/net/wireless/b43/phy_lp.c
50261index 058a9f2..d5cb1ba 100644
50262--- a/drivers/net/wireless/b43/phy_lp.c
50263+++ b/drivers/net/wireless/b43/phy_lp.c
50264@@ -2502,7 +2502,7 @@ static int lpphy_b2063_tune(struct b43_wldev *dev,
50265 {
50266 struct ssb_bus *bus = dev->dev->sdev->bus;
50267
50268- static const struct b206x_channel *chandata = NULL;
50269+ const struct b206x_channel *chandata = NULL;
50270 u32 crystal_freq = bus->chipco.pmu.crystalfreq * 1000;
50271 u32 freqref, vco_freq, val1, val2, val3, timeout, timeoutref, count;
50272 u16 old_comm15, scale;
50273diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
50274index e566580..2c218ca 100644
50275--- a/drivers/net/wireless/iwlegacy/3945-mac.c
50276+++ b/drivers/net/wireless/iwlegacy/3945-mac.c
50277@@ -3631,7 +3631,9 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
50278 */
50279 if (il3945_mod_params.disable_hw_scan) {
50280 D_INFO("Disabling hw_scan\n");
50281- il3945_mac_ops.hw_scan = NULL;
50282+ pax_open_kernel();
50283+ *(void **)&il3945_mac_ops.hw_scan = NULL;
50284+ pax_close_kernel();
50285 }
50286
50287 D_INFO("*** LOAD DRIVER ***\n");
50288diff --git a/drivers/net/wireless/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
50289index 0ffb6ff..c0b7f0e 100644
50290--- a/drivers/net/wireless/iwlwifi/dvm/debugfs.c
50291+++ b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
50292@@ -188,7 +188,7 @@ static ssize_t iwl_dbgfs_sram_write(struct file *file,
50293 {
50294 struct iwl_priv *priv = file->private_data;
50295 char buf[64];
50296- int buf_size;
50297+ size_t buf_size;
50298 u32 offset, len;
50299
50300 memset(buf, 0, sizeof(buf));
50301@@ -458,7 +458,7 @@ static ssize_t iwl_dbgfs_rx_handlers_write(struct file *file,
50302 struct iwl_priv *priv = file->private_data;
50303
50304 char buf[8];
50305- int buf_size;
50306+ size_t buf_size;
50307 u32 reset_flag;
50308
50309 memset(buf, 0, sizeof(buf));
50310@@ -539,7 +539,7 @@ static ssize_t iwl_dbgfs_disable_ht40_write(struct file *file,
50311 {
50312 struct iwl_priv *priv = file->private_data;
50313 char buf[8];
50314- int buf_size;
50315+ size_t buf_size;
50316 int ht40;
50317
50318 memset(buf, 0, sizeof(buf));
50319@@ -591,7 +591,7 @@ static ssize_t iwl_dbgfs_sleep_level_override_write(struct file *file,
50320 {
50321 struct iwl_priv *priv = file->private_data;
50322 char buf[8];
50323- int buf_size;
50324+ size_t buf_size;
50325 int value;
50326
50327 memset(buf, 0, sizeof(buf));
50328@@ -683,10 +683,10 @@ DEBUGFS_READ_FILE_OPS(temperature);
50329 DEBUGFS_READ_WRITE_FILE_OPS(sleep_level_override);
50330 DEBUGFS_READ_FILE_OPS(current_sleep_command);
50331
50332-static const char *fmt_value = " %-30s %10u\n";
50333-static const char *fmt_hex = " %-30s 0x%02X\n";
50334-static const char *fmt_table = " %-30s %10u %10u %10u %10u\n";
50335-static const char *fmt_header =
50336+static const char fmt_value[] = " %-30s %10u\n";
50337+static const char fmt_hex[] = " %-30s 0x%02X\n";
50338+static const char fmt_table[] = " %-30s %10u %10u %10u %10u\n";
50339+static const char fmt_header[] =
50340 "%-32s current cumulative delta max\n";
50341
50342 static int iwl_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz)
50343@@ -1856,7 +1856,7 @@ static ssize_t iwl_dbgfs_clear_ucode_statistics_write(struct file *file,
50344 {
50345 struct iwl_priv *priv = file->private_data;
50346 char buf[8];
50347- int buf_size;
50348+ size_t buf_size;
50349 int clear;
50350
50351 memset(buf, 0, sizeof(buf));
50352@@ -1901,7 +1901,7 @@ static ssize_t iwl_dbgfs_ucode_tracing_write(struct file *file,
50353 {
50354 struct iwl_priv *priv = file->private_data;
50355 char buf[8];
50356- int buf_size;
50357+ size_t buf_size;
50358 int trace;
50359
50360 memset(buf, 0, sizeof(buf));
50361@@ -1972,7 +1972,7 @@ static ssize_t iwl_dbgfs_missed_beacon_write(struct file *file,
50362 {
50363 struct iwl_priv *priv = file->private_data;
50364 char buf[8];
50365- int buf_size;
50366+ size_t buf_size;
50367 int missed;
50368
50369 memset(buf, 0, sizeof(buf));
50370@@ -2013,7 +2013,7 @@ static ssize_t iwl_dbgfs_plcp_delta_write(struct file *file,
50371
50372 struct iwl_priv *priv = file->private_data;
50373 char buf[8];
50374- int buf_size;
50375+ size_t buf_size;
50376 int plcp;
50377
50378 memset(buf, 0, sizeof(buf));
50379@@ -2073,7 +2073,7 @@ static ssize_t iwl_dbgfs_txfifo_flush_write(struct file *file,
50380
50381 struct iwl_priv *priv = file->private_data;
50382 char buf[8];
50383- int buf_size;
50384+ size_t buf_size;
50385 int flush;
50386
50387 memset(buf, 0, sizeof(buf));
50388@@ -2163,7 +2163,7 @@ static ssize_t iwl_dbgfs_protection_mode_write(struct file *file,
50389
50390 struct iwl_priv *priv = file->private_data;
50391 char buf[8];
50392- int buf_size;
50393+ size_t buf_size;
50394 int rts;
50395
50396 if (!priv->cfg->ht_params)
50397@@ -2204,7 +2204,7 @@ static ssize_t iwl_dbgfs_echo_test_write(struct file *file,
50398 {
50399 struct iwl_priv *priv = file->private_data;
50400 char buf[8];
50401- int buf_size;
50402+ size_t buf_size;
50403
50404 memset(buf, 0, sizeof(buf));
50405 buf_size = min(count, sizeof(buf) - 1);
50406@@ -2238,7 +2238,7 @@ static ssize_t iwl_dbgfs_log_event_write(struct file *file,
50407 struct iwl_priv *priv = file->private_data;
50408 u32 event_log_flag;
50409 char buf[8];
50410- int buf_size;
50411+ size_t buf_size;
50412
50413 /* check that the interface is up */
50414 if (!iwl_is_ready(priv))
50415@@ -2292,7 +2292,7 @@ static ssize_t iwl_dbgfs_calib_disabled_write(struct file *file,
50416 struct iwl_priv *priv = file->private_data;
50417 char buf[8];
50418 u32 calib_disabled;
50419- int buf_size;
50420+ size_t buf_size;
50421
50422 memset(buf, 0, sizeof(buf));
50423 buf_size = min(count, sizeof(buf) - 1);
50424diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
50425index cb72edb..242b24f 100644
50426--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
50427+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
50428@@ -1837,7 +1837,7 @@ static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
50429 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
50430
50431 char buf[8];
50432- int buf_size;
50433+ size_t buf_size;
50434 u32 reset_flag;
50435
50436 memset(buf, 0, sizeof(buf));
50437@@ -1858,7 +1858,7 @@ static ssize_t iwl_dbgfs_csr_write(struct file *file,
50438 {
50439 struct iwl_trans *trans = file->private_data;
50440 char buf[8];
50441- int buf_size;
50442+ size_t buf_size;
50443 int csr;
50444
50445 memset(buf, 0, sizeof(buf));
50446diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
50447index 8908be6..fe97ddd 100644
50448--- a/drivers/net/wireless/mac80211_hwsim.c
50449+++ b/drivers/net/wireless/mac80211_hwsim.c
50450@@ -3070,20 +3070,20 @@ static int __init init_mac80211_hwsim(void)
50451 if (channels < 1)
50452 return -EINVAL;
50453
50454- mac80211_hwsim_mchan_ops = mac80211_hwsim_ops;
50455- mac80211_hwsim_mchan_ops.hw_scan = mac80211_hwsim_hw_scan;
50456- mac80211_hwsim_mchan_ops.cancel_hw_scan = mac80211_hwsim_cancel_hw_scan;
50457- mac80211_hwsim_mchan_ops.sw_scan_start = NULL;
50458- mac80211_hwsim_mchan_ops.sw_scan_complete = NULL;
50459- mac80211_hwsim_mchan_ops.remain_on_channel = mac80211_hwsim_roc;
50460- mac80211_hwsim_mchan_ops.cancel_remain_on_channel = mac80211_hwsim_croc;
50461- mac80211_hwsim_mchan_ops.add_chanctx = mac80211_hwsim_add_chanctx;
50462- mac80211_hwsim_mchan_ops.remove_chanctx = mac80211_hwsim_remove_chanctx;
50463- mac80211_hwsim_mchan_ops.change_chanctx = mac80211_hwsim_change_chanctx;
50464- mac80211_hwsim_mchan_ops.assign_vif_chanctx =
50465- mac80211_hwsim_assign_vif_chanctx;
50466- mac80211_hwsim_mchan_ops.unassign_vif_chanctx =
50467- mac80211_hwsim_unassign_vif_chanctx;
50468+ pax_open_kernel();
50469+ memcpy((void *)&mac80211_hwsim_mchan_ops, &mac80211_hwsim_ops, sizeof mac80211_hwsim_mchan_ops);
50470+ *(void **)&mac80211_hwsim_mchan_ops.hw_scan = mac80211_hwsim_hw_scan;
50471+ *(void **)&mac80211_hwsim_mchan_ops.cancel_hw_scan = mac80211_hwsim_cancel_hw_scan;
50472+ *(void **)&mac80211_hwsim_mchan_ops.sw_scan_start = NULL;
50473+ *(void **)&mac80211_hwsim_mchan_ops.sw_scan_complete = NULL;
50474+ *(void **)&mac80211_hwsim_mchan_ops.remain_on_channel = mac80211_hwsim_roc;
50475+ *(void **)&mac80211_hwsim_mchan_ops.cancel_remain_on_channel = mac80211_hwsim_croc;
50476+ *(void **)&mac80211_hwsim_mchan_ops.add_chanctx = mac80211_hwsim_add_chanctx;
50477+ *(void **)&mac80211_hwsim_mchan_ops.remove_chanctx = mac80211_hwsim_remove_chanctx;
50478+ *(void **)&mac80211_hwsim_mchan_ops.change_chanctx = mac80211_hwsim_change_chanctx;
50479+ *(void **)&mac80211_hwsim_mchan_ops.assign_vif_chanctx = mac80211_hwsim_assign_vif_chanctx;
50480+ *(void **)&mac80211_hwsim_mchan_ops.unassign_vif_chanctx = mac80211_hwsim_unassign_vif_chanctx;
50481+ pax_close_kernel();
50482
50483 spin_lock_init(&hwsim_radio_lock);
50484 INIT_LIST_HEAD(&hwsim_radios);
50485diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
50486index 60d44ce..884dd1c 100644
50487--- a/drivers/net/wireless/rndis_wlan.c
50488+++ b/drivers/net/wireless/rndis_wlan.c
50489@@ -1236,7 +1236,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
50490
50491 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
50492
50493- if (rts_threshold < 0 || rts_threshold > 2347)
50494+ if (rts_threshold > 2347)
50495 rts_threshold = 2347;
50496
50497 tmp = cpu_to_le32(rts_threshold);
50498diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
50499index 9bb398b..b0cc047 100644
50500--- a/drivers/net/wireless/rt2x00/rt2x00.h
50501+++ b/drivers/net/wireless/rt2x00/rt2x00.h
50502@@ -375,7 +375,7 @@ struct rt2x00_intf {
50503 * for hardware which doesn't support hardware
50504 * sequence counting.
50505 */
50506- atomic_t seqno;
50507+ atomic_unchecked_t seqno;
50508 };
50509
50510 static inline struct rt2x00_intf* vif_to_intf(struct ieee80211_vif *vif)
50511diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
50512index 68b620b..92ecd9e 100644
50513--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
50514+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
50515@@ -224,9 +224,9 @@ static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
50516 * sequence counter given by mac80211.
50517 */
50518 if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
50519- seqno = atomic_add_return(0x10, &intf->seqno);
50520+ seqno = atomic_add_return_unchecked(0x10, &intf->seqno);
50521 else
50522- seqno = atomic_read(&intf->seqno);
50523+ seqno = atomic_read_unchecked(&intf->seqno);
50524
50525 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
50526 hdr->seq_ctrl |= cpu_to_le16(seqno);
50527diff --git a/drivers/net/wireless/ti/wl1251/sdio.c b/drivers/net/wireless/ti/wl1251/sdio.c
50528index b661f896..ddf7d2b 100644
50529--- a/drivers/net/wireless/ti/wl1251/sdio.c
50530+++ b/drivers/net/wireless/ti/wl1251/sdio.c
50531@@ -282,13 +282,17 @@ static int wl1251_sdio_probe(struct sdio_func *func,
50532
50533 irq_set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
50534
50535- wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
50536- wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
50537+ pax_open_kernel();
50538+ *(void **)&wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
50539+ *(void **)&wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
50540+ pax_close_kernel();
50541
50542 wl1251_info("using dedicated interrupt line");
50543 } else {
50544- wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq;
50545- wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq;
50546+ pax_open_kernel();
50547+ *(void **)&wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq;
50548+ *(void **)&wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq;
50549+ pax_close_kernel();
50550
50551 wl1251_info("using SDIO interrupt");
50552 }
50553diff --git a/drivers/net/wireless/ti/wl12xx/main.c b/drivers/net/wireless/ti/wl12xx/main.c
50554index 144d1f8..7030936 100644
50555--- a/drivers/net/wireless/ti/wl12xx/main.c
50556+++ b/drivers/net/wireless/ti/wl12xx/main.c
50557@@ -657,7 +657,9 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
50558 sizeof(wl->conf.mem));
50559
50560 /* read data preparation is only needed by wl127x */
50561- wl->ops->prepare_read = wl127x_prepare_read;
50562+ pax_open_kernel();
50563+ *(void **)&wl->ops->prepare_read = wl127x_prepare_read;
50564+ pax_close_kernel();
50565
50566 wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER,
50567 WL127X_IFTYPE_SR_VER, WL127X_MAJOR_SR_VER,
50568@@ -682,7 +684,9 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
50569 sizeof(wl->conf.mem));
50570
50571 /* read data preparation is only needed by wl127x */
50572- wl->ops->prepare_read = wl127x_prepare_read;
50573+ pax_open_kernel();
50574+ *(void **)&wl->ops->prepare_read = wl127x_prepare_read;
50575+ pax_close_kernel();
50576
50577 wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER,
50578 WL127X_IFTYPE_SR_VER, WL127X_MAJOR_SR_VER,
50579diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c
50580index 717c4f5..a813aeb 100644
50581--- a/drivers/net/wireless/ti/wl18xx/main.c
50582+++ b/drivers/net/wireless/ti/wl18xx/main.c
50583@@ -1923,8 +1923,10 @@ static int wl18xx_setup(struct wl1271 *wl)
50584 }
50585
50586 if (!checksum_param) {
50587- wl18xx_ops.set_rx_csum = NULL;
50588- wl18xx_ops.init_vif = NULL;
50589+ pax_open_kernel();
50590+ *(void **)&wl18xx_ops.set_rx_csum = NULL;
50591+ *(void **)&wl18xx_ops.init_vif = NULL;
50592+ pax_close_kernel();
50593 }
50594
50595 /* Enable 11a Band only if we have 5G antennas */
50596diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
50597index a912dc0..a8225ba 100644
50598--- a/drivers/net/wireless/zd1211rw/zd_usb.c
50599+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
50600@@ -385,7 +385,7 @@ static inline void handle_regs_int(struct urb *urb)
50601 {
50602 struct zd_usb *usb = urb->context;
50603 struct zd_usb_interrupt *intr = &usb->intr;
50604- int len;
50605+ unsigned int len;
50606 u16 int_num;
50607
50608 ZD_ASSERT(in_interrupt());
50609diff --git a/drivers/nfc/nfcwilink.c b/drivers/nfc/nfcwilink.c
50610index ce2e2cf..f81e500 100644
50611--- a/drivers/nfc/nfcwilink.c
50612+++ b/drivers/nfc/nfcwilink.c
50613@@ -497,7 +497,7 @@ static struct nci_ops nfcwilink_ops = {
50614
50615 static int nfcwilink_probe(struct platform_device *pdev)
50616 {
50617- static struct nfcwilink *drv;
50618+ struct nfcwilink *drv;
50619 int rc;
50620 __u32 protocols;
50621
50622diff --git a/drivers/nfc/st21nfca/st21nfca.c b/drivers/nfc/st21nfca/st21nfca.c
50623index 24d3d24..b662ba0 100644
50624--- a/drivers/nfc/st21nfca/st21nfca.c
50625+++ b/drivers/nfc/st21nfca/st21nfca.c
50626@@ -148,14 +148,14 @@ static int st21nfca_hci_load_session(struct nfc_hci_dev *hdev)
50627 ST21NFCA_DEVICE_MGNT_GATE,
50628 ST21NFCA_DEVICE_MGNT_PIPE);
50629 if (r < 0)
50630- goto free_info;
50631+ return r;
50632
50633 /* Get pipe list */
50634 r = nfc_hci_send_cmd(hdev, ST21NFCA_DEVICE_MGNT_GATE,
50635 ST21NFCA_DM_GETINFO, pipe_list, sizeof(pipe_list),
50636 &skb_pipe_list);
50637 if (r < 0)
50638- goto free_info;
50639+ return r;
50640
50641 /* Complete the existing gate_pipe table */
50642 for (i = 0; i < skb_pipe_list->len; i++) {
50643@@ -181,6 +181,7 @@ static int st21nfca_hci_load_session(struct nfc_hci_dev *hdev)
50644 info->src_host_id != ST21NFCA_ESE_HOST_ID) {
50645 pr_err("Unexpected apdu_reader pipe on host %x\n",
50646 info->src_host_id);
50647+ kfree_skb(skb_pipe_info);
50648 continue;
50649 }
50650
50651@@ -200,6 +201,7 @@ static int st21nfca_hci_load_session(struct nfc_hci_dev *hdev)
50652 hdev->pipes[st21nfca_gates[j].pipe].dest_host =
50653 info->src_host_id;
50654 }
50655+ kfree_skb(skb_pipe_info);
50656 }
50657
50658 /*
50659@@ -214,13 +216,12 @@ static int st21nfca_hci_load_session(struct nfc_hci_dev *hdev)
50660 st21nfca_gates[i].gate,
50661 st21nfca_gates[i].pipe);
50662 if (r < 0)
50663- goto free_info;
50664+ goto free_list;
50665 }
50666 }
50667
50668 memcpy(hdev->init_data.gates, st21nfca_gates, sizeof(st21nfca_gates));
50669-free_info:
50670- kfree_skb(skb_pipe_info);
50671+free_list:
50672 kfree_skb(skb_pipe_list);
50673 return r;
50674 }
50675@@ -588,7 +589,7 @@ static int st21nfca_get_iso14443_3_uid(struct nfc_hci_dev *hdev, u8 *gate,
50676 goto exit;
50677 }
50678
50679- gate = uid_skb->data;
50680+ memcpy(gate, uid_skb->data, uid_skb->len);
50681 *len = uid_skb->len;
50682 exit:
50683 kfree_skb(uid_skb);
50684diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
50685index 3a896c9..ac7b1c8 100644
50686--- a/drivers/of/fdt.c
50687+++ b/drivers/of/fdt.c
50688@@ -1118,7 +1118,9 @@ static int __init of_fdt_raw_init(void)
50689 pr_warn("fdt: not creating '/sys/firmware/fdt': CRC check failed\n");
50690 return 0;
50691 }
50692- of_fdt_raw_attr.size = fdt_totalsize(initial_boot_params);
50693+ pax_open_kernel();
50694+ *(size_t *)&of_fdt_raw_attr.size = fdt_totalsize(initial_boot_params);
50695+ pax_close_kernel();
50696 return sysfs_create_bin_file(firmware_kobj, &of_fdt_raw_attr);
50697 }
50698 late_initcall(of_fdt_raw_init);
50699diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
50700index d93b2b6..ae50401 100644
50701--- a/drivers/oprofile/buffer_sync.c
50702+++ b/drivers/oprofile/buffer_sync.c
50703@@ -332,7 +332,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm)
50704 if (cookie == NO_COOKIE)
50705 offset = pc;
50706 if (cookie == INVALID_COOKIE) {
50707- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
50708+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
50709 offset = pc;
50710 }
50711 if (cookie != last_cookie) {
50712@@ -376,14 +376,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
50713 /* add userspace sample */
50714
50715 if (!mm) {
50716- atomic_inc(&oprofile_stats.sample_lost_no_mm);
50717+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
50718 return 0;
50719 }
50720
50721 cookie = lookup_dcookie(mm, s->eip, &offset);
50722
50723 if (cookie == INVALID_COOKIE) {
50724- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
50725+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
50726 return 0;
50727 }
50728
50729@@ -552,7 +552,7 @@ void sync_buffer(int cpu)
50730 /* ignore backtraces if failed to add a sample */
50731 if (state == sb_bt_start) {
50732 state = sb_bt_ignore;
50733- atomic_inc(&oprofile_stats.bt_lost_no_mapping);
50734+ atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
50735 }
50736 }
50737 release_mm(mm);
50738diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
50739index c0cc4e7..44d4e54 100644
50740--- a/drivers/oprofile/event_buffer.c
50741+++ b/drivers/oprofile/event_buffer.c
50742@@ -53,7 +53,7 @@ void add_event_entry(unsigned long value)
50743 }
50744
50745 if (buffer_pos == buffer_size) {
50746- atomic_inc(&oprofile_stats.event_lost_overflow);
50747+ atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
50748 return;
50749 }
50750
50751diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
50752index ed2c3ec..deda85a 100644
50753--- a/drivers/oprofile/oprof.c
50754+++ b/drivers/oprofile/oprof.c
50755@@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work)
50756 if (oprofile_ops.switch_events())
50757 return;
50758
50759- atomic_inc(&oprofile_stats.multiplex_counter);
50760+ atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
50761 start_switch_worker();
50762 }
50763
50764diff --git a/drivers/oprofile/oprofile_files.c b/drivers/oprofile/oprofile_files.c
50765index ee2cfce..7f8f699 100644
50766--- a/drivers/oprofile/oprofile_files.c
50767+++ b/drivers/oprofile/oprofile_files.c
50768@@ -27,7 +27,7 @@ unsigned long oprofile_time_slice;
50769
50770 #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
50771
50772-static ssize_t timeout_read(struct file *file, char __user *buf,
50773+static ssize_t __intentional_overflow(-1) timeout_read(struct file *file, char __user *buf,
50774 size_t count, loff_t *offset)
50775 {
50776 return oprofilefs_ulong_to_user(jiffies_to_msecs(oprofile_time_slice),
50777diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
50778index 59659ce..6c860a0 100644
50779--- a/drivers/oprofile/oprofile_stats.c
50780+++ b/drivers/oprofile/oprofile_stats.c
50781@@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
50782 cpu_buf->sample_invalid_eip = 0;
50783 }
50784
50785- atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
50786- atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
50787- atomic_set(&oprofile_stats.event_lost_overflow, 0);
50788- atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
50789- atomic_set(&oprofile_stats.multiplex_counter, 0);
50790+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
50791+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
50792+ atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
50793+ atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
50794+ atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
50795 }
50796
50797
50798diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
50799index 1fc622b..8c48fc3 100644
50800--- a/drivers/oprofile/oprofile_stats.h
50801+++ b/drivers/oprofile/oprofile_stats.h
50802@@ -13,11 +13,11 @@
50803 #include <linux/atomic.h>
50804
50805 struct oprofile_stat_struct {
50806- atomic_t sample_lost_no_mm;
50807- atomic_t sample_lost_no_mapping;
50808- atomic_t bt_lost_no_mapping;
50809- atomic_t event_lost_overflow;
50810- atomic_t multiplex_counter;
50811+ atomic_unchecked_t sample_lost_no_mm;
50812+ atomic_unchecked_t sample_lost_no_mapping;
50813+ atomic_unchecked_t bt_lost_no_mapping;
50814+ atomic_unchecked_t event_lost_overflow;
50815+ atomic_unchecked_t multiplex_counter;
50816 };
50817
50818 extern struct oprofile_stat_struct oprofile_stats;
50819diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
50820index 3f49345..c750d0b 100644
50821--- a/drivers/oprofile/oprofilefs.c
50822+++ b/drivers/oprofile/oprofilefs.c
50823@@ -176,8 +176,8 @@ int oprofilefs_create_ro_ulong(struct dentry *root,
50824
50825 static ssize_t atomic_read_file(struct file *file, char __user *buf, size_t count, loff_t *offset)
50826 {
50827- atomic_t *val = file->private_data;
50828- return oprofilefs_ulong_to_user(atomic_read(val), buf, count, offset);
50829+ atomic_unchecked_t *val = file->private_data;
50830+ return oprofilefs_ulong_to_user(atomic_read_unchecked(val), buf, count, offset);
50831 }
50832
50833
50834@@ -189,7 +189,7 @@ static const struct file_operations atomic_ro_fops = {
50835
50836
50837 int oprofilefs_create_ro_atomic(struct dentry *root,
50838- char const *name, atomic_t *val)
50839+ char const *name, atomic_unchecked_t *val)
50840 {
50841 return __oprofilefs_create_file(root, name,
50842 &atomic_ro_fops, 0444, val);
50843diff --git a/drivers/oprofile/timer_int.c b/drivers/oprofile/timer_int.c
50844index bdef916..88c7dee 100644
50845--- a/drivers/oprofile/timer_int.c
50846+++ b/drivers/oprofile/timer_int.c
50847@@ -93,7 +93,7 @@ static int oprofile_cpu_notify(struct notifier_block *self,
50848 return NOTIFY_OK;
50849 }
50850
50851-static struct notifier_block __refdata oprofile_cpu_notifier = {
50852+static struct notifier_block oprofile_cpu_notifier = {
50853 .notifier_call = oprofile_cpu_notify,
50854 };
50855
50856diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
50857index 3b47080..6cd05dd 100644
50858--- a/drivers/parport/procfs.c
50859+++ b/drivers/parport/procfs.c
50860@@ -64,7 +64,7 @@ static int do_active_device(struct ctl_table *table, int write,
50861
50862 *ppos += len;
50863
50864- return copy_to_user(result, buffer, len) ? -EFAULT : 0;
50865+ return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
50866 }
50867
50868 #ifdef CONFIG_PARPORT_1284
50869@@ -106,7 +106,7 @@ static int do_autoprobe(struct ctl_table *table, int write,
50870
50871 *ppos += len;
50872
50873- return copy_to_user (result, buffer, len) ? -EFAULT : 0;
50874+ return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
50875 }
50876 #endif /* IEEE1284.3 support. */
50877
50878diff --git a/drivers/pci/host/pci-host-generic.c b/drivers/pci/host/pci-host-generic.c
50879index ba46e58..90cfc24 100644
50880--- a/drivers/pci/host/pci-host-generic.c
50881+++ b/drivers/pci/host/pci-host-generic.c
50882@@ -26,9 +26,9 @@
50883 #include <linux/platform_device.h>
50884
50885 struct gen_pci_cfg_bus_ops {
50886+ struct pci_ops ops;
50887 u32 bus_shift;
50888- void __iomem *(*map_bus)(struct pci_bus *, unsigned int, int);
50889-};
50890+} __do_const;
50891
50892 struct gen_pci_cfg_windows {
50893 struct resource res;
50894@@ -56,8 +56,12 @@ static void __iomem *gen_pci_map_cfg_bus_cam(struct pci_bus *bus,
50895 }
50896
50897 static struct gen_pci_cfg_bus_ops gen_pci_cfg_cam_bus_ops = {
50898+ .ops = {
50899+ .map_bus = gen_pci_map_cfg_bus_cam,
50900+ .read = pci_generic_config_read,
50901+ .write = pci_generic_config_write,
50902+ },
50903 .bus_shift = 16,
50904- .map_bus = gen_pci_map_cfg_bus_cam,
50905 };
50906
50907 static void __iomem *gen_pci_map_cfg_bus_ecam(struct pci_bus *bus,
50908@@ -72,13 +76,12 @@ static void __iomem *gen_pci_map_cfg_bus_ecam(struct pci_bus *bus,
50909 }
50910
50911 static struct gen_pci_cfg_bus_ops gen_pci_cfg_ecam_bus_ops = {
50912+ .ops = {
50913+ .map_bus = gen_pci_map_cfg_bus_ecam,
50914+ .read = pci_generic_config_read,
50915+ .write = pci_generic_config_write,
50916+ },
50917 .bus_shift = 20,
50918- .map_bus = gen_pci_map_cfg_bus_ecam,
50919-};
50920-
50921-static struct pci_ops gen_pci_ops = {
50922- .read = pci_generic_config_read,
50923- .write = pci_generic_config_write,
50924 };
50925
50926 static const struct of_device_id gen_pci_of_match[] = {
50927@@ -219,7 +222,6 @@ static int gen_pci_probe(struct platform_device *pdev)
50928 .private_data = (void **)&pci,
50929 .setup = gen_pci_setup,
50930 .map_irq = of_irq_parse_and_map_pci,
50931- .ops = &gen_pci_ops,
50932 };
50933
50934 if (!pci)
50935@@ -241,7 +243,7 @@ static int gen_pci_probe(struct platform_device *pdev)
50936
50937 of_id = of_match_node(gen_pci_of_match, np);
50938 pci->cfg.ops = of_id->data;
50939- gen_pci_ops.map_bus = pci->cfg.ops->map_bus;
50940+ hw.ops = &pci->cfg.ops->ops;
50941 pci->host.dev.parent = dev;
50942 INIT_LIST_HEAD(&pci->host.windows);
50943 INIT_LIST_HEAD(&pci->resources);
50944diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c
50945index 6ca2399..68d866b 100644
50946--- a/drivers/pci/hotplug/acpiphp_ibm.c
50947+++ b/drivers/pci/hotplug/acpiphp_ibm.c
50948@@ -452,7 +452,9 @@ static int __init ibm_acpiphp_init(void)
50949 goto init_cleanup;
50950 }
50951
50952- ibm_apci_table_attr.size = ibm_get_table_from_acpi(NULL);
50953+ pax_open_kernel();
50954+ *(size_t *)&ibm_apci_table_attr.size = ibm_get_table_from_acpi(NULL);
50955+ pax_close_kernel();
50956 retval = sysfs_create_bin_file(sysdir, &ibm_apci_table_attr);
50957
50958 return retval;
50959diff --git a/drivers/pci/hotplug/cpcihp_generic.c b/drivers/pci/hotplug/cpcihp_generic.c
50960index 66b7bbe..26bee78 100644
50961--- a/drivers/pci/hotplug/cpcihp_generic.c
50962+++ b/drivers/pci/hotplug/cpcihp_generic.c
50963@@ -73,7 +73,6 @@ static u16 port;
50964 static unsigned int enum_bit;
50965 static u8 enum_mask;
50966
50967-static struct cpci_hp_controller_ops generic_hpc_ops;
50968 static struct cpci_hp_controller generic_hpc;
50969
50970 static int __init validate_parameters(void)
50971@@ -139,6 +138,10 @@ static int query_enum(void)
50972 return ((value & enum_mask) == enum_mask);
50973 }
50974
50975+static struct cpci_hp_controller_ops generic_hpc_ops = {
50976+ .query_enum = query_enum,
50977+};
50978+
50979 static int __init cpcihp_generic_init(void)
50980 {
50981 int status;
50982@@ -165,7 +168,6 @@ static int __init cpcihp_generic_init(void)
50983 pci_dev_put(dev);
50984
50985 memset(&generic_hpc, 0, sizeof (struct cpci_hp_controller));
50986- generic_hpc_ops.query_enum = query_enum;
50987 generic_hpc.ops = &generic_hpc_ops;
50988
50989 status = cpci_hp_register_controller(&generic_hpc);
50990diff --git a/drivers/pci/hotplug/cpcihp_zt5550.c b/drivers/pci/hotplug/cpcihp_zt5550.c
50991index 7ecf34e..effed62 100644
50992--- a/drivers/pci/hotplug/cpcihp_zt5550.c
50993+++ b/drivers/pci/hotplug/cpcihp_zt5550.c
50994@@ -59,7 +59,6 @@
50995 /* local variables */
50996 static bool debug;
50997 static bool poll;
50998-static struct cpci_hp_controller_ops zt5550_hpc_ops;
50999 static struct cpci_hp_controller zt5550_hpc;
51000
51001 /* Primary cPCI bus bridge device */
51002@@ -204,6 +203,10 @@ static int zt5550_hc_disable_irq(void)
51003 return 0;
51004 }
51005
51006+static struct cpci_hp_controller_ops zt5550_hpc_ops = {
51007+ .query_enum = zt5550_hc_query_enum,
51008+};
51009+
51010 static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
51011 {
51012 int status;
51013@@ -215,16 +218,17 @@ static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id
51014 dbg("returned from zt5550_hc_config");
51015
51016 memset(&zt5550_hpc, 0, sizeof (struct cpci_hp_controller));
51017- zt5550_hpc_ops.query_enum = zt5550_hc_query_enum;
51018 zt5550_hpc.ops = &zt5550_hpc_ops;
51019 if (!poll) {
51020 zt5550_hpc.irq = hc_dev->irq;
51021 zt5550_hpc.irq_flags = IRQF_SHARED;
51022 zt5550_hpc.dev_id = hc_dev;
51023
51024- zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq;
51025- zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq;
51026- zt5550_hpc_ops.check_irq = zt5550_hc_check_irq;
51027+ pax_open_kernel();
51028+ *(void **)&zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq;
51029+ *(void **)&zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq;
51030+ *(void **)&zt5550_hpc_ops.check_irq = zt5550_hc_check_irq;
51031+ pax_open_kernel();
51032 } else {
51033 info("using ENUM# polling mode");
51034 }
51035diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
51036index 1e08ff8c..3cd145f 100644
51037--- a/drivers/pci/hotplug/cpqphp_nvram.c
51038+++ b/drivers/pci/hotplug/cpqphp_nvram.c
51039@@ -425,8 +425,10 @@ static u32 store_HRT (void __iomem *rom_start)
51040
51041 void compaq_nvram_init (void __iomem *rom_start)
51042 {
51043+#ifndef CONFIG_PAX_KERNEXEC
51044 if (rom_start)
51045 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
51046+#endif
51047
51048 dbg("int15 entry = %p\n", compaq_int15_entry_point);
51049
51050diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c
51051index 56d8486..f26113f 100644
51052--- a/drivers/pci/hotplug/pci_hotplug_core.c
51053+++ b/drivers/pci/hotplug/pci_hotplug_core.c
51054@@ -436,8 +436,10 @@ int __pci_hp_register(struct hotplug_slot *slot, struct pci_bus *bus,
51055 return -EINVAL;
51056 }
51057
51058- slot->ops->owner = owner;
51059- slot->ops->mod_name = mod_name;
51060+ pax_open_kernel();
51061+ *(struct module **)&slot->ops->owner = owner;
51062+ *(const char **)&slot->ops->mod_name = mod_name;
51063+ pax_close_kernel();
51064
51065 mutex_lock(&pci_hp_mutex);
51066 /*
51067diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
51068index 07aa722..84514b4 100644
51069--- a/drivers/pci/hotplug/pciehp_core.c
51070+++ b/drivers/pci/hotplug/pciehp_core.c
51071@@ -92,7 +92,7 @@ static int init_slot(struct controller *ctrl)
51072 struct slot *slot = ctrl->slot;
51073 struct hotplug_slot *hotplug = NULL;
51074 struct hotplug_slot_info *info = NULL;
51075- struct hotplug_slot_ops *ops = NULL;
51076+ hotplug_slot_ops_no_const *ops = NULL;
51077 char name[SLOT_NAME_SIZE];
51078 int retval = -ENOMEM;
51079
51080diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
51081index c3e7dfc..cbd9625 100644
51082--- a/drivers/pci/msi.c
51083+++ b/drivers/pci/msi.c
51084@@ -513,8 +513,8 @@ static int populate_msi_sysfs(struct pci_dev *pdev)
51085 {
51086 struct attribute **msi_attrs;
51087 struct attribute *msi_attr;
51088- struct device_attribute *msi_dev_attr;
51089- struct attribute_group *msi_irq_group;
51090+ device_attribute_no_const *msi_dev_attr;
51091+ attribute_group_no_const *msi_irq_group;
51092 const struct attribute_group **msi_irq_groups;
51093 struct msi_desc *entry;
51094 int ret = -ENOMEM;
51095@@ -573,7 +573,7 @@ error_attrs:
51096 count = 0;
51097 msi_attr = msi_attrs[count];
51098 while (msi_attr) {
51099- msi_dev_attr = container_of(msi_attr, struct device_attribute, attr);
51100+ msi_dev_attr = container_of(msi_attr, device_attribute_no_const, attr);
51101 kfree(msi_attr->name);
51102 kfree(msi_dev_attr);
51103 ++count;
51104diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
51105index 312f23a..d21181c 100644
51106--- a/drivers/pci/pci-sysfs.c
51107+++ b/drivers/pci/pci-sysfs.c
51108@@ -1140,7 +1140,7 @@ static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine)
51109 {
51110 /* allocate attribute structure, piggyback attribute name */
51111 int name_len = write_combine ? 13 : 10;
51112- struct bin_attribute *res_attr;
51113+ bin_attribute_no_const *res_attr;
51114 int retval;
51115
51116 res_attr = kzalloc(sizeof(*res_attr) + name_len, GFP_ATOMIC);
51117@@ -1317,7 +1317,7 @@ static struct device_attribute reset_attr = __ATTR(reset, 0200, NULL, reset_stor
51118 static int pci_create_capabilities_sysfs(struct pci_dev *dev)
51119 {
51120 int retval;
51121- struct bin_attribute *attr;
51122+ bin_attribute_no_const *attr;
51123
51124 /* If the device has VPD, try to expose it in sysfs. */
51125 if (dev->vpd) {
51126@@ -1364,7 +1364,7 @@ int __must_check pci_create_sysfs_dev_files(struct pci_dev *pdev)
51127 {
51128 int retval;
51129 int rom_size = 0;
51130- struct bin_attribute *attr;
51131+ bin_attribute_no_const *attr;
51132
51133 if (!sysfs_initialized)
51134 return -EACCES;
51135diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
51136index 4091f82..7d98eef 100644
51137--- a/drivers/pci/pci.h
51138+++ b/drivers/pci/pci.h
51139@@ -99,7 +99,7 @@ struct pci_vpd_ops {
51140 struct pci_vpd {
51141 unsigned int len;
51142 const struct pci_vpd_ops *ops;
51143- struct bin_attribute *attr; /* descriptor for sysfs VPD entry */
51144+ bin_attribute_no_const *attr; /* descriptor for sysfs VPD entry */
51145 };
51146
51147 int pci_vpd_pci22_init(struct pci_dev *dev);
51148diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
51149index 820740a..8b1c673 100644
51150--- a/drivers/pci/pcie/aspm.c
51151+++ b/drivers/pci/pcie/aspm.c
51152@@ -27,9 +27,9 @@
51153 #define MODULE_PARAM_PREFIX "pcie_aspm."
51154
51155 /* Note: those are not register definitions */
51156-#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
51157-#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
51158-#define ASPM_STATE_L1 (4) /* L1 state */
51159+#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
51160+#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
51161+#define ASPM_STATE_L1 (4U) /* L1 state */
51162 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
51163 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
51164
51165diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c
51166index be35da2..ec16cdb 100644
51167--- a/drivers/pci/pcie/portdrv_pci.c
51168+++ b/drivers/pci/pcie/portdrv_pci.c
51169@@ -324,7 +324,7 @@ static int __init dmi_pcie_pme_disable_msi(const struct dmi_system_id *d)
51170 return 0;
51171 }
51172
51173-static struct dmi_system_id __initdata pcie_portdrv_dmi_table[] = {
51174+static const struct dmi_system_id __initconst pcie_portdrv_dmi_table[] = {
51175 /*
51176 * Boxes that should not use MSI for PCIe PME signaling.
51177 */
51178diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
51179index 8d2f400..c97cc91 100644
51180--- a/drivers/pci/probe.c
51181+++ b/drivers/pci/probe.c
51182@@ -175,7 +175,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
51183 u16 orig_cmd;
51184 struct pci_bus_region region, inverted_region;
51185
51186- mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
51187+ mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
51188
51189 /* No printks while decoding is disabled! */
51190 if (!dev->mmio_always_on) {
51191diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
51192index 3f155e7..0f4b1f0 100644
51193--- a/drivers/pci/proc.c
51194+++ b/drivers/pci/proc.c
51195@@ -434,7 +434,16 @@ static const struct file_operations proc_bus_pci_dev_operations = {
51196 static int __init pci_proc_init(void)
51197 {
51198 struct pci_dev *dev = NULL;
51199+
51200+#ifdef CONFIG_GRKERNSEC_PROC_ADD
51201+#ifdef CONFIG_GRKERNSEC_PROC_USER
51202+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
51203+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
51204+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
51205+#endif
51206+#else
51207 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
51208+#endif
51209 proc_create("devices", 0, proc_bus_pci_dir,
51210 &proc_bus_pci_dev_operations);
51211 proc_initialized = 1;
51212diff --git a/drivers/platform/chrome/chromeos_pstore.c b/drivers/platform/chrome/chromeos_pstore.c
51213index 3474920..acc9581 100644
51214--- a/drivers/platform/chrome/chromeos_pstore.c
51215+++ b/drivers/platform/chrome/chromeos_pstore.c
51216@@ -13,7 +13,7 @@
51217 #include <linux/platform_device.h>
51218 #include <linux/pstore_ram.h>
51219
51220-static struct dmi_system_id chromeos_pstore_dmi_table[] __initdata = {
51221+static const struct dmi_system_id chromeos_pstore_dmi_table[] __initconst = {
51222 {
51223 /*
51224 * Today all Chromebooks/boxes ship with Google_* as version and
51225diff --git a/drivers/platform/x86/alienware-wmi.c b/drivers/platform/x86/alienware-wmi.c
51226index 1e1e594..8fe59c5 100644
51227--- a/drivers/platform/x86/alienware-wmi.c
51228+++ b/drivers/platform/x86/alienware-wmi.c
51229@@ -150,7 +150,7 @@ struct wmax_led_args {
51230 } __packed;
51231
51232 static struct platform_device *platform_device;
51233-static struct device_attribute *zone_dev_attrs;
51234+static device_attribute_no_const *zone_dev_attrs;
51235 static struct attribute **zone_attrs;
51236 static struct platform_zone *zone_data;
51237
51238@@ -160,7 +160,7 @@ static struct platform_driver platform_driver = {
51239 }
51240 };
51241
51242-static struct attribute_group zone_attribute_group = {
51243+static attribute_group_no_const zone_attribute_group = {
51244 .name = "rgb_zones",
51245 };
51246
51247diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
51248index 7543a56..367ca8ed 100644
51249--- a/drivers/platform/x86/asus-wmi.c
51250+++ b/drivers/platform/x86/asus-wmi.c
51251@@ -1589,6 +1589,10 @@ static int show_dsts(struct seq_file *m, void *data)
51252 int err;
51253 u32 retval = -1;
51254
51255+#ifdef CONFIG_GRKERNSEC_KMEM
51256+ return -EPERM;
51257+#endif
51258+
51259 err = asus_wmi_get_devstate(asus, asus->debug.dev_id, &retval);
51260
51261 if (err < 0)
51262@@ -1605,6 +1609,10 @@ static int show_devs(struct seq_file *m, void *data)
51263 int err;
51264 u32 retval = -1;
51265
51266+#ifdef CONFIG_GRKERNSEC_KMEM
51267+ return -EPERM;
51268+#endif
51269+
51270 err = asus_wmi_set_devstate(asus->debug.dev_id, asus->debug.ctrl_param,
51271 &retval);
51272
51273@@ -1629,6 +1637,10 @@ static int show_call(struct seq_file *m, void *data)
51274 union acpi_object *obj;
51275 acpi_status status;
51276
51277+#ifdef CONFIG_GRKERNSEC_KMEM
51278+ return -EPERM;
51279+#endif
51280+
51281 status = wmi_evaluate_method(ASUS_WMI_MGMT_GUID,
51282 1, asus->debug.method_id,
51283 &input, &output);
51284diff --git a/drivers/platform/x86/compal-laptop.c b/drivers/platform/x86/compal-laptop.c
51285index bceb30b..bf063d4 100644
51286--- a/drivers/platform/x86/compal-laptop.c
51287+++ b/drivers/platform/x86/compal-laptop.c
51288@@ -766,7 +766,7 @@ static int dmi_check_cb_extra(const struct dmi_system_id *id)
51289 return 1;
51290 }
51291
51292-static struct dmi_system_id __initdata compal_dmi_table[] = {
51293+static const struct dmi_system_id __initconst compal_dmi_table[] = {
51294 {
51295 .ident = "FL90/IFL90",
51296 .matches = {
51297diff --git a/drivers/platform/x86/hdaps.c b/drivers/platform/x86/hdaps.c
51298index 458e6c9..089aee7 100644
51299--- a/drivers/platform/x86/hdaps.c
51300+++ b/drivers/platform/x86/hdaps.c
51301@@ -514,7 +514,7 @@ static int __init hdaps_dmi_match_invert(const struct dmi_system_id *id)
51302 "ThinkPad T42p", so the order of the entries matters.
51303 If your ThinkPad is not recognized, please update to latest
51304 BIOS. This is especially the case for some R52 ThinkPads. */
51305-static struct dmi_system_id __initdata hdaps_whitelist[] = {
51306+static const struct dmi_system_id __initconst hdaps_whitelist[] = {
51307 HDAPS_DMI_MATCH_INVERT("IBM", "ThinkPad R50p", HDAPS_BOTH_AXES),
51308 HDAPS_DMI_MATCH_NORMAL("IBM", "ThinkPad R50"),
51309 HDAPS_DMI_MATCH_NORMAL("IBM", "ThinkPad R51"),
51310diff --git a/drivers/platform/x86/ibm_rtl.c b/drivers/platform/x86/ibm_rtl.c
51311index 97c2be1..2ee50ce 100644
51312--- a/drivers/platform/x86/ibm_rtl.c
51313+++ b/drivers/platform/x86/ibm_rtl.c
51314@@ -227,7 +227,7 @@ static void rtl_teardown_sysfs(void) {
51315 }
51316
51317
51318-static struct dmi_system_id __initdata ibm_rtl_dmi_table[] = {
51319+static const struct dmi_system_id __initconst ibm_rtl_dmi_table[] = {
51320 { \
51321 .matches = { \
51322 DMI_MATCH(DMI_SYS_VENDOR, "IBM"), \
51323diff --git a/drivers/platform/x86/intel_oaktrail.c b/drivers/platform/x86/intel_oaktrail.c
51324index a4a4258..a58a04c 100644
51325--- a/drivers/platform/x86/intel_oaktrail.c
51326+++ b/drivers/platform/x86/intel_oaktrail.c
51327@@ -298,7 +298,7 @@ static int dmi_check_cb(const struct dmi_system_id *id)
51328 return 0;
51329 }
51330
51331-static struct dmi_system_id __initdata oaktrail_dmi_table[] = {
51332+static const struct dmi_system_id __initconst oaktrail_dmi_table[] = {
51333 {
51334 .ident = "OakTrail platform",
51335 .matches = {
51336diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c
51337index 0859877..59d596d 100644
51338--- a/drivers/platform/x86/msi-laptop.c
51339+++ b/drivers/platform/x86/msi-laptop.c
51340@@ -604,7 +604,7 @@ static int dmi_check_cb(const struct dmi_system_id *dmi)
51341 return 1;
51342 }
51343
51344-static struct dmi_system_id __initdata msi_dmi_table[] = {
51345+static const struct dmi_system_id __initconst msi_dmi_table[] = {
51346 {
51347 .ident = "MSI S270",
51348 .matches = {
51349@@ -999,12 +999,14 @@ static int __init load_scm_model_init(struct platform_device *sdev)
51350
51351 if (!quirks->ec_read_only) {
51352 /* allow userland write sysfs file */
51353- dev_attr_bluetooth.store = store_bluetooth;
51354- dev_attr_wlan.store = store_wlan;
51355- dev_attr_threeg.store = store_threeg;
51356- dev_attr_bluetooth.attr.mode |= S_IWUSR;
51357- dev_attr_wlan.attr.mode |= S_IWUSR;
51358- dev_attr_threeg.attr.mode |= S_IWUSR;
51359+ pax_open_kernel();
51360+ *(void **)&dev_attr_bluetooth.store = store_bluetooth;
51361+ *(void **)&dev_attr_wlan.store = store_wlan;
51362+ *(void **)&dev_attr_threeg.store = store_threeg;
51363+ *(umode_t *)&dev_attr_bluetooth.attr.mode |= S_IWUSR;
51364+ *(umode_t *)&dev_attr_wlan.attr.mode |= S_IWUSR;
51365+ *(umode_t *)&dev_attr_threeg.attr.mode |= S_IWUSR;
51366+ pax_close_kernel();
51367 }
51368
51369 /* disable hardware control by fn key */
51370diff --git a/drivers/platform/x86/msi-wmi.c b/drivers/platform/x86/msi-wmi.c
51371index 6d2bac0..ec2b029 100644
51372--- a/drivers/platform/x86/msi-wmi.c
51373+++ b/drivers/platform/x86/msi-wmi.c
51374@@ -183,7 +183,7 @@ static const struct backlight_ops msi_backlight_ops = {
51375 static void msi_wmi_notify(u32 value, void *context)
51376 {
51377 struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL };
51378- static struct key_entry *key;
51379+ struct key_entry *key;
51380 union acpi_object *obj;
51381 acpi_status status;
51382
51383diff --git a/drivers/platform/x86/samsung-laptop.c b/drivers/platform/x86/samsung-laptop.c
51384index 9e701b2..c68a7b5 100644
51385--- a/drivers/platform/x86/samsung-laptop.c
51386+++ b/drivers/platform/x86/samsung-laptop.c
51387@@ -1567,7 +1567,7 @@ static int __init samsung_dmi_matched(const struct dmi_system_id *d)
51388 return 0;
51389 }
51390
51391-static struct dmi_system_id __initdata samsung_dmi_table[] = {
51392+static const struct dmi_system_id __initconst samsung_dmi_table[] = {
51393 {
51394 .matches = {
51395 DMI_MATCH(DMI_SYS_VENDOR,
51396diff --git a/drivers/platform/x86/samsung-q10.c b/drivers/platform/x86/samsung-q10.c
51397index e6aac72..e11ff24 100644
51398--- a/drivers/platform/x86/samsung-q10.c
51399+++ b/drivers/platform/x86/samsung-q10.c
51400@@ -95,7 +95,7 @@ static int __init dmi_check_callback(const struct dmi_system_id *id)
51401 return 1;
51402 }
51403
51404-static struct dmi_system_id __initdata samsungq10_dmi_table[] = {
51405+static const struct dmi_system_id __initconst samsungq10_dmi_table[] = {
51406 {
51407 .ident = "Samsung Q10",
51408 .matches = {
51409diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
51410index e51c1e7..71bb385 100644
51411--- a/drivers/platform/x86/sony-laptop.c
51412+++ b/drivers/platform/x86/sony-laptop.c
51413@@ -2526,7 +2526,7 @@ static void sony_nc_gfx_switch_cleanup(struct platform_device *pd)
51414 }
51415
51416 /* High speed charging function */
51417-static struct device_attribute *hsc_handle;
51418+static device_attribute_no_const *hsc_handle;
51419
51420 static ssize_t sony_nc_highspeed_charging_store(struct device *dev,
51421 struct device_attribute *attr,
51422@@ -2600,7 +2600,7 @@ static void sony_nc_highspeed_charging_cleanup(struct platform_device *pd)
51423 }
51424
51425 /* low battery function */
51426-static struct device_attribute *lowbatt_handle;
51427+static device_attribute_no_const *lowbatt_handle;
51428
51429 static ssize_t sony_nc_lowbatt_store(struct device *dev,
51430 struct device_attribute *attr,
51431@@ -2666,7 +2666,7 @@ static void sony_nc_lowbatt_cleanup(struct platform_device *pd)
51432 }
51433
51434 /* fan speed function */
51435-static struct device_attribute *fan_handle, *hsf_handle;
51436+static device_attribute_no_const *fan_handle, *hsf_handle;
51437
51438 static ssize_t sony_nc_hsfan_store(struct device *dev,
51439 struct device_attribute *attr,
51440@@ -2773,7 +2773,7 @@ static void sony_nc_fanspeed_cleanup(struct platform_device *pd)
51441 }
51442
51443 /* USB charge function */
51444-static struct device_attribute *uc_handle;
51445+static device_attribute_no_const *uc_handle;
51446
51447 static ssize_t sony_nc_usb_charge_store(struct device *dev,
51448 struct device_attribute *attr,
51449@@ -2847,7 +2847,7 @@ static void sony_nc_usb_charge_cleanup(struct platform_device *pd)
51450 }
51451
51452 /* Panel ID function */
51453-static struct device_attribute *panel_handle;
51454+static device_attribute_no_const *panel_handle;
51455
51456 static ssize_t sony_nc_panelid_show(struct device *dev,
51457 struct device_attribute *attr, char *buffer)
51458@@ -2894,7 +2894,7 @@ static void sony_nc_panelid_cleanup(struct platform_device *pd)
51459 }
51460
51461 /* smart connect function */
51462-static struct device_attribute *sc_handle;
51463+static device_attribute_no_const *sc_handle;
51464
51465 static ssize_t sony_nc_smart_conn_store(struct device *dev,
51466 struct device_attribute *attr,
51467@@ -4854,7 +4854,7 @@ static struct acpi_driver sony_pic_driver = {
51468 .drv.pm = &sony_pic_pm,
51469 };
51470
51471-static struct dmi_system_id __initdata sonypi_dmi_table[] = {
51472+static const struct dmi_system_id __initconst sonypi_dmi_table[] = {
51473 {
51474 .ident = "Sony Vaio",
51475 .matches = {
51476diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
51477index 3b8ceee..e18652c 100644
51478--- a/drivers/platform/x86/thinkpad_acpi.c
51479+++ b/drivers/platform/x86/thinkpad_acpi.c
51480@@ -2093,7 +2093,7 @@ static int hotkey_mask_get(void)
51481 return 0;
51482 }
51483
51484-void static hotkey_mask_warn_incomplete_mask(void)
51485+static void hotkey_mask_warn_incomplete_mask(void)
51486 {
51487 /* log only what the user can fix... */
51488 const u32 wantedmask = hotkey_driver_mask &
51489@@ -2437,10 +2437,10 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
51490 && !tp_features.bright_unkfw)
51491 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
51492 }
51493+}
51494
51495 #undef TPACPI_COMPARE_KEY
51496 #undef TPACPI_MAY_SEND_KEY
51497-}
51498
51499 /*
51500 * Polling driver
51501diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
51502index 438d4c7..ca8a2fb 100644
51503--- a/drivers/pnp/pnpbios/bioscalls.c
51504+++ b/drivers/pnp/pnpbios/bioscalls.c
51505@@ -59,7 +59,7 @@ do { \
51506 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
51507 } while(0)
51508
51509-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
51510+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
51511 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
51512
51513 /*
51514@@ -96,7 +96,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
51515
51516 cpu = get_cpu();
51517 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
51518+
51519+ pax_open_kernel();
51520 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
51521+ pax_close_kernel();
51522
51523 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
51524 spin_lock_irqsave(&pnp_bios_lock, flags);
51525@@ -134,7 +137,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
51526 :"memory");
51527 spin_unlock_irqrestore(&pnp_bios_lock, flags);
51528
51529+ pax_open_kernel();
51530 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
51531+ pax_close_kernel();
51532+
51533 put_cpu();
51534
51535 /* If we get here and this is set then the PnP BIOS faulted on us. */
51536@@ -468,7 +474,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base)
51537 return status;
51538 }
51539
51540-void pnpbios_calls_init(union pnp_bios_install_struct *header)
51541+void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
51542 {
51543 int i;
51544
51545@@ -476,6 +482,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
51546 pnp_bios_callpoint.offset = header->fields.pm16offset;
51547 pnp_bios_callpoint.segment = PNP_CS16;
51548
51549+ pax_open_kernel();
51550+
51551 for_each_possible_cpu(i) {
51552 struct desc_struct *gdt = get_cpu_gdt_table(i);
51553 if (!gdt)
51554@@ -487,4 +495,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
51555 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
51556 (unsigned long)__va(header->fields.pm16dseg));
51557 }
51558+
51559+ pax_close_kernel();
51560 }
51561diff --git a/drivers/pnp/pnpbios/core.c b/drivers/pnp/pnpbios/core.c
51562index facd43b..b291260 100644
51563--- a/drivers/pnp/pnpbios/core.c
51564+++ b/drivers/pnp/pnpbios/core.c
51565@@ -494,7 +494,7 @@ static int __init exploding_pnp_bios(const struct dmi_system_id *d)
51566 return 0;
51567 }
51568
51569-static struct dmi_system_id pnpbios_dmi_table[] __initdata = {
51570+static const struct dmi_system_id pnpbios_dmi_table[] __initconst = {
51571 { /* PnPBIOS GPF on boot */
51572 .callback = exploding_pnp_bios,
51573 .ident = "Higraded P14H",
51574diff --git a/drivers/power/pda_power.c b/drivers/power/pda_power.c
51575index 0c52e2a..3421ab7 100644
51576--- a/drivers/power/pda_power.c
51577+++ b/drivers/power/pda_power.c
51578@@ -37,7 +37,11 @@ static int polling;
51579
51580 #if IS_ENABLED(CONFIG_USB_PHY)
51581 static struct usb_phy *transceiver;
51582-static struct notifier_block otg_nb;
51583+static int otg_handle_notification(struct notifier_block *nb,
51584+ unsigned long event, void *unused);
51585+static struct notifier_block otg_nb = {
51586+ .notifier_call = otg_handle_notification
51587+};
51588 #endif
51589
51590 static struct regulator *ac_draw;
51591@@ -369,7 +373,6 @@ static int pda_power_probe(struct platform_device *pdev)
51592
51593 #if IS_ENABLED(CONFIG_USB_PHY)
51594 if (!IS_ERR_OR_NULL(transceiver) && pdata->use_otg_notifier) {
51595- otg_nb.notifier_call = otg_handle_notification;
51596 ret = usb_register_notifier(transceiver, &otg_nb);
51597 if (ret) {
51598 dev_err(dev, "failure to register otg notifier\n");
51599diff --git a/drivers/power/power_supply.h b/drivers/power/power_supply.h
51600index cc439fd..8fa30df 100644
51601--- a/drivers/power/power_supply.h
51602+++ b/drivers/power/power_supply.h
51603@@ -16,12 +16,12 @@ struct power_supply;
51604
51605 #ifdef CONFIG_SYSFS
51606
51607-extern void power_supply_init_attrs(struct device_type *dev_type);
51608+extern void power_supply_init_attrs(void);
51609 extern int power_supply_uevent(struct device *dev, struct kobj_uevent_env *env);
51610
51611 #else
51612
51613-static inline void power_supply_init_attrs(struct device_type *dev_type) {}
51614+static inline void power_supply_init_attrs(void) {}
51615 #define power_supply_uevent NULL
51616
51617 #endif /* CONFIG_SYSFS */
51618diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c
51619index 694e8cd..9f03483 100644
51620--- a/drivers/power/power_supply_core.c
51621+++ b/drivers/power/power_supply_core.c
51622@@ -28,7 +28,10 @@ EXPORT_SYMBOL_GPL(power_supply_class);
51623 ATOMIC_NOTIFIER_HEAD(power_supply_notifier);
51624 EXPORT_SYMBOL_GPL(power_supply_notifier);
51625
51626-static struct device_type power_supply_dev_type;
51627+extern const struct attribute_group *power_supply_attr_groups[];
51628+static struct device_type power_supply_dev_type = {
51629+ .groups = power_supply_attr_groups,
51630+};
51631
51632 static bool __power_supply_is_supplied_by(struct power_supply *supplier,
51633 struct power_supply *supply)
51634@@ -637,7 +640,7 @@ static int __init power_supply_class_init(void)
51635 return PTR_ERR(power_supply_class);
51636
51637 power_supply_class->dev_uevent = power_supply_uevent;
51638- power_supply_init_attrs(&power_supply_dev_type);
51639+ power_supply_init_attrs();
51640
51641 return 0;
51642 }
51643diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c
51644index 62653f5..d0bb485 100644
51645--- a/drivers/power/power_supply_sysfs.c
51646+++ b/drivers/power/power_supply_sysfs.c
51647@@ -238,17 +238,15 @@ static struct attribute_group power_supply_attr_group = {
51648 .is_visible = power_supply_attr_is_visible,
51649 };
51650
51651-static const struct attribute_group *power_supply_attr_groups[] = {
51652+const struct attribute_group *power_supply_attr_groups[] = {
51653 &power_supply_attr_group,
51654 NULL,
51655 };
51656
51657-void power_supply_init_attrs(struct device_type *dev_type)
51658+void power_supply_init_attrs(void)
51659 {
51660 int i;
51661
51662- dev_type->groups = power_supply_attr_groups;
51663-
51664 for (i = 0; i < ARRAY_SIZE(power_supply_attrs); i++)
51665 __power_supply_attrs[i] = &power_supply_attrs[i].attr;
51666 }
51667diff --git a/drivers/powercap/powercap_sys.c b/drivers/powercap/powercap_sys.c
51668index 84419af..268ede8 100644
51669--- a/drivers/powercap/powercap_sys.c
51670+++ b/drivers/powercap/powercap_sys.c
51671@@ -154,8 +154,77 @@ struct powercap_constraint_attr {
51672 struct device_attribute name_attr;
51673 };
51674
51675+static ssize_t show_constraint_name(struct device *dev,
51676+ struct device_attribute *dev_attr,
51677+ char *buf);
51678+
51679 static struct powercap_constraint_attr
51680- constraint_attrs[MAX_CONSTRAINTS_PER_ZONE];
51681+ constraint_attrs[MAX_CONSTRAINTS_PER_ZONE] = {
51682+ [0 ... MAX_CONSTRAINTS_PER_ZONE - 1] = {
51683+ .power_limit_attr = {
51684+ .attr = {
51685+ .name = NULL,
51686+ .mode = S_IWUSR | S_IRUGO
51687+ },
51688+ .show = show_constraint_power_limit_uw,
51689+ .store = store_constraint_power_limit_uw
51690+ },
51691+
51692+ .time_window_attr = {
51693+ .attr = {
51694+ .name = NULL,
51695+ .mode = S_IWUSR | S_IRUGO
51696+ },
51697+ .show = show_constraint_time_window_us,
51698+ .store = store_constraint_time_window_us
51699+ },
51700+
51701+ .max_power_attr = {
51702+ .attr = {
51703+ .name = NULL,
51704+ .mode = S_IRUGO
51705+ },
51706+ .show = show_constraint_max_power_uw,
51707+ .store = NULL
51708+ },
51709+
51710+ .min_power_attr = {
51711+ .attr = {
51712+ .name = NULL,
51713+ .mode = S_IRUGO
51714+ },
51715+ .show = show_constraint_min_power_uw,
51716+ .store = NULL
51717+ },
51718+
51719+ .max_time_window_attr = {
51720+ .attr = {
51721+ .name = NULL,
51722+ .mode = S_IRUGO
51723+ },
51724+ .show = show_constraint_max_time_window_us,
51725+ .store = NULL
51726+ },
51727+
51728+ .min_time_window_attr = {
51729+ .attr = {
51730+ .name = NULL,
51731+ .mode = S_IRUGO
51732+ },
51733+ .show = show_constraint_min_time_window_us,
51734+ .store = NULL
51735+ },
51736+
51737+ .name_attr = {
51738+ .attr = {
51739+ .name = NULL,
51740+ .mode = S_IRUGO
51741+ },
51742+ .show = show_constraint_name,
51743+ .store = NULL
51744+ }
51745+ }
51746+};
51747
51748 /* A list of powercap control_types */
51749 static LIST_HEAD(powercap_cntrl_list);
51750@@ -193,23 +262,16 @@ static ssize_t show_constraint_name(struct device *dev,
51751 }
51752
51753 static int create_constraint_attribute(int id, const char *name,
51754- int mode,
51755- struct device_attribute *dev_attr,
51756- ssize_t (*show)(struct device *,
51757- struct device_attribute *, char *),
51758- ssize_t (*store)(struct device *,
51759- struct device_attribute *,
51760- const char *, size_t)
51761- )
51762+ struct device_attribute *dev_attr)
51763 {
51764+ name = kasprintf(GFP_KERNEL, "constraint_%d_%s", id, name);
51765
51766- dev_attr->attr.name = kasprintf(GFP_KERNEL, "constraint_%d_%s",
51767- id, name);
51768- if (!dev_attr->attr.name)
51769+ if (!name)
51770 return -ENOMEM;
51771- dev_attr->attr.mode = mode;
51772- dev_attr->show = show;
51773- dev_attr->store = store;
51774+
51775+ pax_open_kernel();
51776+ *(const char **)&dev_attr->attr.name = name;
51777+ pax_close_kernel();
51778
51779 return 0;
51780 }
51781@@ -236,49 +298,31 @@ static int seed_constraint_attributes(void)
51782
51783 for (i = 0; i < MAX_CONSTRAINTS_PER_ZONE; ++i) {
51784 ret = create_constraint_attribute(i, "power_limit_uw",
51785- S_IWUSR | S_IRUGO,
51786- &constraint_attrs[i].power_limit_attr,
51787- show_constraint_power_limit_uw,
51788- store_constraint_power_limit_uw);
51789+ &constraint_attrs[i].power_limit_attr);
51790 if (ret)
51791 goto err_alloc;
51792 ret = create_constraint_attribute(i, "time_window_us",
51793- S_IWUSR | S_IRUGO,
51794- &constraint_attrs[i].time_window_attr,
51795- show_constraint_time_window_us,
51796- store_constraint_time_window_us);
51797+ &constraint_attrs[i].time_window_attr);
51798 if (ret)
51799 goto err_alloc;
51800- ret = create_constraint_attribute(i, "name", S_IRUGO,
51801- &constraint_attrs[i].name_attr,
51802- show_constraint_name,
51803- NULL);
51804+ ret = create_constraint_attribute(i, "name",
51805+ &constraint_attrs[i].name_attr);
51806 if (ret)
51807 goto err_alloc;
51808- ret = create_constraint_attribute(i, "max_power_uw", S_IRUGO,
51809- &constraint_attrs[i].max_power_attr,
51810- show_constraint_max_power_uw,
51811- NULL);
51812+ ret = create_constraint_attribute(i, "max_power_uw",
51813+ &constraint_attrs[i].max_power_attr);
51814 if (ret)
51815 goto err_alloc;
51816- ret = create_constraint_attribute(i, "min_power_uw", S_IRUGO,
51817- &constraint_attrs[i].min_power_attr,
51818- show_constraint_min_power_uw,
51819- NULL);
51820+ ret = create_constraint_attribute(i, "min_power_uw",
51821+ &constraint_attrs[i].min_power_attr);
51822 if (ret)
51823 goto err_alloc;
51824 ret = create_constraint_attribute(i, "max_time_window_us",
51825- S_IRUGO,
51826- &constraint_attrs[i].max_time_window_attr,
51827- show_constraint_max_time_window_us,
51828- NULL);
51829+ &constraint_attrs[i].max_time_window_attr);
51830 if (ret)
51831 goto err_alloc;
51832 ret = create_constraint_attribute(i, "min_time_window_us",
51833- S_IRUGO,
51834- &constraint_attrs[i].min_time_window_attr,
51835- show_constraint_min_time_window_us,
51836- NULL);
51837+ &constraint_attrs[i].min_time_window_attr);
51838 if (ret)
51839 goto err_alloc;
51840
51841@@ -378,10 +422,12 @@ static void create_power_zone_common_attributes(
51842 power_zone->zone_dev_attrs[count++] =
51843 &dev_attr_max_energy_range_uj.attr;
51844 if (power_zone->ops->get_energy_uj) {
51845+ pax_open_kernel();
51846 if (power_zone->ops->reset_energy_uj)
51847- dev_attr_energy_uj.attr.mode = S_IWUSR | S_IRUGO;
51848+ *(umode_t *)&dev_attr_energy_uj.attr.mode = S_IWUSR | S_IRUGO;
51849 else
51850- dev_attr_energy_uj.attr.mode = S_IRUGO;
51851+ *(umode_t *)&dev_attr_energy_uj.attr.mode = S_IRUGO;
51852+ pax_close_kernel();
51853 power_zone->zone_dev_attrs[count++] =
51854 &dev_attr_energy_uj.attr;
51855 }
51856diff --git a/drivers/ptp/ptp_private.h b/drivers/ptp/ptp_private.h
51857index 9c5d414..c7900ce 100644
51858--- a/drivers/ptp/ptp_private.h
51859+++ b/drivers/ptp/ptp_private.h
51860@@ -51,7 +51,7 @@ struct ptp_clock {
51861 struct mutex pincfg_mux; /* protect concurrent info->pin_config access */
51862 wait_queue_head_t tsev_wq;
51863 int defunct; /* tells readers to go away when clock is being removed */
51864- struct device_attribute *pin_dev_attr;
51865+ device_attribute_no_const *pin_dev_attr;
51866 struct attribute **pin_attr;
51867 struct attribute_group pin_attr_group;
51868 };
51869diff --git a/drivers/ptp/ptp_sysfs.c b/drivers/ptp/ptp_sysfs.c
51870index 302e626..12579af 100644
51871--- a/drivers/ptp/ptp_sysfs.c
51872+++ b/drivers/ptp/ptp_sysfs.c
51873@@ -280,7 +280,7 @@ static int ptp_populate_pins(struct ptp_clock *ptp)
51874 goto no_pin_attr;
51875
51876 for (i = 0; i < n_pins; i++) {
51877- struct device_attribute *da = &ptp->pin_dev_attr[i];
51878+ device_attribute_no_const *da = &ptp->pin_dev_attr[i];
51879 sysfs_attr_init(&da->attr);
51880 da->attr.name = info->pin_config[i].name;
51881 da->attr.mode = 0644;
51882diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
51883index a4a8a6d..a3456f4 100644
51884--- a/drivers/regulator/core.c
51885+++ b/drivers/regulator/core.c
51886@@ -3529,7 +3529,7 @@ regulator_register(const struct regulator_desc *regulator_desc,
51887 const struct regulation_constraints *constraints = NULL;
51888 const struct regulator_init_data *init_data;
51889 struct regulator_config *config = NULL;
51890- static atomic_t regulator_no = ATOMIC_INIT(-1);
51891+ static atomic_unchecked_t regulator_no = ATOMIC_INIT(-1);
51892 struct regulator_dev *rdev;
51893 struct device *dev;
51894 int ret, i;
51895@@ -3613,7 +3613,7 @@ regulator_register(const struct regulator_desc *regulator_desc,
51896 rdev->dev.class = &regulator_class;
51897 rdev->dev.parent = dev;
51898 dev_set_name(&rdev->dev, "regulator.%lu",
51899- (unsigned long) atomic_inc_return(&regulator_no));
51900+ (unsigned long) atomic_inc_return_unchecked(&regulator_no));
51901 ret = device_register(&rdev->dev);
51902 if (ret != 0) {
51903 put_device(&rdev->dev);
51904diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
51905index 7eee2ca..4024513 100644
51906--- a/drivers/regulator/max8660.c
51907+++ b/drivers/regulator/max8660.c
51908@@ -424,8 +424,10 @@ static int max8660_probe(struct i2c_client *client,
51909 max8660->shadow_regs[MAX8660_OVER1] = 5;
51910 } else {
51911 /* Otherwise devices can be toggled via software */
51912- max8660_dcdc_ops.enable = max8660_dcdc_enable;
51913- max8660_dcdc_ops.disable = max8660_dcdc_disable;
51914+ pax_open_kernel();
51915+ *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
51916+ *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
51917+ pax_close_kernel();
51918 }
51919
51920 /*
51921diff --git a/drivers/regulator/max8973-regulator.c b/drivers/regulator/max8973-regulator.c
51922index c3d55c2..0dddfe6 100644
51923--- a/drivers/regulator/max8973-regulator.c
51924+++ b/drivers/regulator/max8973-regulator.c
51925@@ -403,9 +403,11 @@ static int max8973_probe(struct i2c_client *client,
51926 if (!pdata || !pdata->enable_ext_control) {
51927 max->desc.enable_reg = MAX8973_VOUT;
51928 max->desc.enable_mask = MAX8973_VOUT_ENABLE;
51929- max->ops.enable = regulator_enable_regmap;
51930- max->ops.disable = regulator_disable_regmap;
51931- max->ops.is_enabled = regulator_is_enabled_regmap;
51932+ pax_open_kernel();
51933+ *(void **)&max->ops.enable = regulator_enable_regmap;
51934+ *(void **)&max->ops.disable = regulator_disable_regmap;
51935+ *(void **)&max->ops.is_enabled = regulator_is_enabled_regmap;
51936+ pax_close_kernel();
51937 }
51938
51939 if (pdata) {
51940diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
51941index 0d17c92..a29f627 100644
51942--- a/drivers/regulator/mc13892-regulator.c
51943+++ b/drivers/regulator/mc13892-regulator.c
51944@@ -584,10 +584,12 @@ static int mc13892_regulator_probe(struct platform_device *pdev)
51945 mc13xxx_unlock(mc13892);
51946
51947 /* update mc13892_vcam ops */
51948- memcpy(&mc13892_vcam_ops, mc13892_regulators[MC13892_VCAM].desc.ops,
51949+ pax_open_kernel();
51950+ memcpy((void *)&mc13892_vcam_ops, mc13892_regulators[MC13892_VCAM].desc.ops,
51951 sizeof(struct regulator_ops));
51952- mc13892_vcam_ops.set_mode = mc13892_vcam_set_mode,
51953- mc13892_vcam_ops.get_mode = mc13892_vcam_get_mode,
51954+ *(void **)&mc13892_vcam_ops.set_mode = mc13892_vcam_set_mode,
51955+ *(void **)&mc13892_vcam_ops.get_mode = mc13892_vcam_get_mode,
51956+ pax_close_kernel();
51957 mc13892_regulators[MC13892_VCAM].desc.ops = &mc13892_vcam_ops;
51958
51959 mc13xxx_data = mc13xxx_parse_regulators_dt(pdev, mc13892_regulators,
51960diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
51961index 5b2e761..c8c8a4a 100644
51962--- a/drivers/rtc/rtc-cmos.c
51963+++ b/drivers/rtc/rtc-cmos.c
51964@@ -789,7 +789,9 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
51965 hpet_rtc_timer_init();
51966
51967 /* export at least the first block of NVRAM */
51968- nvram.size = address_space - NVRAM_OFFSET;
51969+ pax_open_kernel();
51970+ *(size_t *)&nvram.size = address_space - NVRAM_OFFSET;
51971+ pax_close_kernel();
51972 retval = sysfs_create_bin_file(&dev->kobj, &nvram);
51973 if (retval < 0) {
51974 dev_dbg(dev, "can't create nvram file? %d\n", retval);
51975diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
51976index 799c34b..8e9786a 100644
51977--- a/drivers/rtc/rtc-dev.c
51978+++ b/drivers/rtc/rtc-dev.c
51979@@ -16,6 +16,7 @@
51980 #include <linux/module.h>
51981 #include <linux/rtc.h>
51982 #include <linux/sched.h>
51983+#include <linux/grsecurity.h>
51984 #include "rtc-core.h"
51985
51986 static dev_t rtc_devt;
51987@@ -347,6 +348,8 @@ static long rtc_dev_ioctl(struct file *file,
51988 if (copy_from_user(&tm, uarg, sizeof(tm)))
51989 return -EFAULT;
51990
51991+ gr_log_timechange();
51992+
51993 return rtc_set_time(rtc, &tm);
51994
51995 case RTC_PIE_ON:
51996diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
51997index 4ffabb3..1f87fca 100644
51998--- a/drivers/rtc/rtc-ds1307.c
51999+++ b/drivers/rtc/rtc-ds1307.c
52000@@ -107,7 +107,7 @@ struct ds1307 {
52001 u8 offset; /* register's offset */
52002 u8 regs[11];
52003 u16 nvram_offset;
52004- struct bin_attribute *nvram;
52005+ bin_attribute_no_const *nvram;
52006 enum ds_type type;
52007 unsigned long flags;
52008 #define HAS_NVRAM 0 /* bit 0 == sysfs file active */
52009diff --git a/drivers/rtc/rtc-m48t59.c b/drivers/rtc/rtc-m48t59.c
52010index 90abb5b..e0bf6dd 100644
52011--- a/drivers/rtc/rtc-m48t59.c
52012+++ b/drivers/rtc/rtc-m48t59.c
52013@@ -483,7 +483,9 @@ static int m48t59_rtc_probe(struct platform_device *pdev)
52014 if (IS_ERR(m48t59->rtc))
52015 return PTR_ERR(m48t59->rtc);
52016
52017- m48t59_nvram_attr.size = pdata->offset;
52018+ pax_open_kernel();
52019+ *(size_t *)&m48t59_nvram_attr.size = pdata->offset;
52020+ pax_close_kernel();
52021
52022 ret = sysfs_create_bin_file(&pdev->dev.kobj, &m48t59_nvram_attr);
52023 if (ret)
52024diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
52025index e693af6..2e525b6 100644
52026--- a/drivers/scsi/bfa/bfa_fcpim.h
52027+++ b/drivers/scsi/bfa/bfa_fcpim.h
52028@@ -36,7 +36,7 @@ struct bfa_iotag_s {
52029
52030 struct bfa_itn_s {
52031 bfa_isr_func_t isr;
52032-};
52033+} __no_const;
52034
52035 void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
52036 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m));
52037diff --git a/drivers/scsi/bfa/bfa_fcs.c b/drivers/scsi/bfa/bfa_fcs.c
52038index 0f19455..ef7adb5 100644
52039--- a/drivers/scsi/bfa/bfa_fcs.c
52040+++ b/drivers/scsi/bfa/bfa_fcs.c
52041@@ -38,10 +38,21 @@ struct bfa_fcs_mod_s {
52042 #define BFA_FCS_MODULE(_mod) { _mod ## _modinit, _mod ## _modexit }
52043
52044 static struct bfa_fcs_mod_s fcs_modules[] = {
52045- { bfa_fcs_port_attach, NULL, NULL },
52046- { bfa_fcs_uf_attach, NULL, NULL },
52047- { bfa_fcs_fabric_attach, bfa_fcs_fabric_modinit,
52048- bfa_fcs_fabric_modexit },
52049+ {
52050+ .attach = bfa_fcs_port_attach,
52051+ .modinit = NULL,
52052+ .modexit = NULL
52053+ },
52054+ {
52055+ .attach = bfa_fcs_uf_attach,
52056+ .modinit = NULL,
52057+ .modexit = NULL
52058+ },
52059+ {
52060+ .attach = bfa_fcs_fabric_attach,
52061+ .modinit = bfa_fcs_fabric_modinit,
52062+ .modexit = bfa_fcs_fabric_modexit
52063+ },
52064 };
52065
52066 /*
52067diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c
52068index ff75ef8..2dfe00a 100644
52069--- a/drivers/scsi/bfa/bfa_fcs_lport.c
52070+++ b/drivers/scsi/bfa/bfa_fcs_lport.c
52071@@ -89,15 +89,26 @@ static struct {
52072 void (*offline) (struct bfa_fcs_lport_s *port);
52073 } __port_action[] = {
52074 {
52075- bfa_fcs_lport_unknown_init, bfa_fcs_lport_unknown_online,
52076- bfa_fcs_lport_unknown_offline}, {
52077- bfa_fcs_lport_fab_init, bfa_fcs_lport_fab_online,
52078- bfa_fcs_lport_fab_offline}, {
52079- bfa_fcs_lport_n2n_init, bfa_fcs_lport_n2n_online,
52080- bfa_fcs_lport_n2n_offline}, {
52081- bfa_fcs_lport_loop_init, bfa_fcs_lport_loop_online,
52082- bfa_fcs_lport_loop_offline},
52083- };
52084+ .init = bfa_fcs_lport_unknown_init,
52085+ .online = bfa_fcs_lport_unknown_online,
52086+ .offline = bfa_fcs_lport_unknown_offline
52087+ },
52088+ {
52089+ .init = bfa_fcs_lport_fab_init,
52090+ .online = bfa_fcs_lport_fab_online,
52091+ .offline = bfa_fcs_lport_fab_offline
52092+ },
52093+ {
52094+ .init = bfa_fcs_lport_n2n_init,
52095+ .online = bfa_fcs_lport_n2n_online,
52096+ .offline = bfa_fcs_lport_n2n_offline
52097+ },
52098+ {
52099+ .init = bfa_fcs_lport_loop_init,
52100+ .online = bfa_fcs_lport_loop_online,
52101+ .offline = bfa_fcs_lport_loop_offline
52102+ },
52103+};
52104
52105 /*
52106 * fcs_port_sm FCS logical port state machine
52107diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
52108index a38aafa0..fe8f03b 100644
52109--- a/drivers/scsi/bfa/bfa_ioc.h
52110+++ b/drivers/scsi/bfa/bfa_ioc.h
52111@@ -258,7 +258,7 @@ struct bfa_ioc_cbfn_s {
52112 bfa_ioc_disable_cbfn_t disable_cbfn;
52113 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
52114 bfa_ioc_reset_cbfn_t reset_cbfn;
52115-};
52116+} __no_const;
52117
52118 /*
52119 * IOC event notification mechanism.
52120@@ -352,7 +352,7 @@ struct bfa_ioc_hwif_s {
52121 void (*ioc_set_alt_fwstate) (struct bfa_ioc_s *ioc,
52122 enum bfi_ioc_state fwstate);
52123 enum bfi_ioc_state (*ioc_get_alt_fwstate) (struct bfa_ioc_s *ioc);
52124-};
52125+} __no_const;
52126
52127 /*
52128 * Queue element to wait for room in request queue. FIFO order is
52129diff --git a/drivers/scsi/bfa/bfa_modules.h b/drivers/scsi/bfa/bfa_modules.h
52130index a14c784..6de6790 100644
52131--- a/drivers/scsi/bfa/bfa_modules.h
52132+++ b/drivers/scsi/bfa/bfa_modules.h
52133@@ -78,12 +78,12 @@ enum {
52134 \
52135 extern struct bfa_module_s hal_mod_ ## __mod; \
52136 struct bfa_module_s hal_mod_ ## __mod = { \
52137- bfa_ ## __mod ## _meminfo, \
52138- bfa_ ## __mod ## _attach, \
52139- bfa_ ## __mod ## _detach, \
52140- bfa_ ## __mod ## _start, \
52141- bfa_ ## __mod ## _stop, \
52142- bfa_ ## __mod ## _iocdisable, \
52143+ .meminfo = bfa_ ## __mod ## _meminfo, \
52144+ .attach = bfa_ ## __mod ## _attach, \
52145+ .detach = bfa_ ## __mod ## _detach, \
52146+ .start = bfa_ ## __mod ## _start, \
52147+ .stop = bfa_ ## __mod ## _stop, \
52148+ .iocdisable = bfa_ ## __mod ## _iocdisable, \
52149 }
52150
52151 #define BFA_CACHELINE_SZ (256)
52152diff --git a/drivers/scsi/fcoe/fcoe_sysfs.c b/drivers/scsi/fcoe/fcoe_sysfs.c
52153index 045c4e1..13de803 100644
52154--- a/drivers/scsi/fcoe/fcoe_sysfs.c
52155+++ b/drivers/scsi/fcoe/fcoe_sysfs.c
52156@@ -33,8 +33,8 @@
52157 */
52158 #include "libfcoe.h"
52159
52160-static atomic_t ctlr_num;
52161-static atomic_t fcf_num;
52162+static atomic_unchecked_t ctlr_num;
52163+static atomic_unchecked_t fcf_num;
52164
52165 /*
52166 * fcoe_fcf_dev_loss_tmo: the default number of seconds that fcoe sysfs
52167@@ -685,7 +685,7 @@ struct fcoe_ctlr_device *fcoe_ctlr_device_add(struct device *parent,
52168 if (!ctlr)
52169 goto out;
52170
52171- ctlr->id = atomic_inc_return(&ctlr_num) - 1;
52172+ ctlr->id = atomic_inc_return_unchecked(&ctlr_num) - 1;
52173 ctlr->f = f;
52174 ctlr->mode = FIP_CONN_TYPE_FABRIC;
52175 INIT_LIST_HEAD(&ctlr->fcfs);
52176@@ -902,7 +902,7 @@ struct fcoe_fcf_device *fcoe_fcf_device_add(struct fcoe_ctlr_device *ctlr,
52177 fcf->dev.parent = &ctlr->dev;
52178 fcf->dev.bus = &fcoe_bus_type;
52179 fcf->dev.type = &fcoe_fcf_device_type;
52180- fcf->id = atomic_inc_return(&fcf_num) - 1;
52181+ fcf->id = atomic_inc_return_unchecked(&fcf_num) - 1;
52182 fcf->state = FCOE_FCF_STATE_UNKNOWN;
52183
52184 fcf->dev_loss_tmo = ctlr->fcf_dev_loss_tmo;
52185@@ -938,8 +938,8 @@ int __init fcoe_sysfs_setup(void)
52186 {
52187 int error;
52188
52189- atomic_set(&ctlr_num, 0);
52190- atomic_set(&fcf_num, 0);
52191+ atomic_set_unchecked(&ctlr_num, 0);
52192+ atomic_set_unchecked(&fcf_num, 0);
52193
52194 error = bus_register(&fcoe_bus_type);
52195 if (error)
52196diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
52197index 8bb173e..20236b4 100644
52198--- a/drivers/scsi/hosts.c
52199+++ b/drivers/scsi/hosts.c
52200@@ -42,7 +42,7 @@
52201 #include "scsi_logging.h"
52202
52203
52204-static atomic_t scsi_host_next_hn = ATOMIC_INIT(0); /* host_no for next new host */
52205+static atomic_unchecked_t scsi_host_next_hn = ATOMIC_INIT(0); /* host_no for next new host */
52206
52207
52208 static void scsi_host_cls_release(struct device *dev)
52209@@ -392,7 +392,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
52210 * subtract one because we increment first then return, but we need to
52211 * know what the next host number was before increment
52212 */
52213- shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
52214+ shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
52215 shost->dma_channel = 0xff;
52216
52217 /* These three are default values which can be overridden */
52218diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
52219index a1cfbd3..d7f8ebc 100644
52220--- a/drivers/scsi/hpsa.c
52221+++ b/drivers/scsi/hpsa.c
52222@@ -697,10 +697,10 @@ static inline u32 next_command(struct ctlr_info *h, u8 q)
52223 struct reply_queue_buffer *rq = &h->reply_queue[q];
52224
52225 if (h->transMethod & CFGTBL_Trans_io_accel1)
52226- return h->access.command_completed(h, q);
52227+ return h->access->command_completed(h, q);
52228
52229 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
52230- return h->access.command_completed(h, q);
52231+ return h->access->command_completed(h, q);
52232
52233 if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
52234 a = rq->head[rq->current_entry];
52235@@ -837,7 +837,7 @@ static void enqueue_cmd_and_start_io(struct ctlr_info *h,
52236 break;
52237 default:
52238 set_performant_mode(h, c);
52239- h->access.submit_command(h, c);
52240+ h->access->submit_command(h, c);
52241 }
52242 }
52243
52244@@ -5369,17 +5369,17 @@ static void __iomem *remap_pci_mem(ulong base, ulong size)
52245
52246 static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q)
52247 {
52248- return h->access.command_completed(h, q);
52249+ return h->access->command_completed(h, q);
52250 }
52251
52252 static inline bool interrupt_pending(struct ctlr_info *h)
52253 {
52254- return h->access.intr_pending(h);
52255+ return h->access->intr_pending(h);
52256 }
52257
52258 static inline long interrupt_not_for_us(struct ctlr_info *h)
52259 {
52260- return (h->access.intr_pending(h) == 0) ||
52261+ return (h->access->intr_pending(h) == 0) ||
52262 (h->interrupts_enabled == 0);
52263 }
52264
52265@@ -6270,7 +6270,7 @@ static int hpsa_pci_init(struct ctlr_info *h)
52266 if (prod_index < 0)
52267 return prod_index;
52268 h->product_name = products[prod_index].product_name;
52269- h->access = *(products[prod_index].access);
52270+ h->access = products[prod_index].access;
52271
52272 pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
52273 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
52274@@ -6649,7 +6649,7 @@ static void controller_lockup_detected(struct ctlr_info *h)
52275 unsigned long flags;
52276 u32 lockup_detected;
52277
52278- h->access.set_intr_mask(h, HPSA_INTR_OFF);
52279+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
52280 spin_lock_irqsave(&h->lock, flags);
52281 lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
52282 if (!lockup_detected) {
52283@@ -6924,7 +6924,7 @@ reinit_after_soft_reset:
52284 }
52285
52286 /* make sure the board interrupts are off */
52287- h->access.set_intr_mask(h, HPSA_INTR_OFF);
52288+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
52289
52290 if (hpsa_request_irqs(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
52291 goto clean2;
52292@@ -6960,7 +6960,7 @@ reinit_after_soft_reset:
52293 * fake ones to scoop up any residual completions.
52294 */
52295 spin_lock_irqsave(&h->lock, flags);
52296- h->access.set_intr_mask(h, HPSA_INTR_OFF);
52297+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
52298 spin_unlock_irqrestore(&h->lock, flags);
52299 hpsa_free_irqs(h);
52300 rc = hpsa_request_irqs(h, hpsa_msix_discard_completions,
52301@@ -6979,9 +6979,9 @@ reinit_after_soft_reset:
52302 dev_info(&h->pdev->dev, "Board READY.\n");
52303 dev_info(&h->pdev->dev,
52304 "Waiting for stale completions to drain.\n");
52305- h->access.set_intr_mask(h, HPSA_INTR_ON);
52306+ h->access->set_intr_mask(h, HPSA_INTR_ON);
52307 msleep(10000);
52308- h->access.set_intr_mask(h, HPSA_INTR_OFF);
52309+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
52310
52311 rc = controller_reset_failed(h->cfgtable);
52312 if (rc)
52313@@ -7006,7 +7006,7 @@ reinit_after_soft_reset:
52314
52315
52316 /* Turn the interrupts on so we can service requests */
52317- h->access.set_intr_mask(h, HPSA_INTR_ON);
52318+ h->access->set_intr_mask(h, HPSA_INTR_ON);
52319
52320 hpsa_hba_inquiry(h);
52321 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
52322@@ -7079,7 +7079,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
52323 * To write all data in the battery backed cache to disks
52324 */
52325 hpsa_flush_cache(h);
52326- h->access.set_intr_mask(h, HPSA_INTR_OFF);
52327+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
52328 hpsa_free_irqs_and_disable_msix(h);
52329 }
52330
52331@@ -7200,7 +7200,7 @@ static int hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
52332 CFGTBL_Trans_enable_directed_msix |
52333 (trans_support & (CFGTBL_Trans_io_accel1 |
52334 CFGTBL_Trans_io_accel2));
52335- struct access_method access = SA5_performant_access;
52336+ struct access_method *access = &SA5_performant_access;
52337
52338 /* This is a bit complicated. There are 8 registers on
52339 * the controller which we write to to tell it 8 different
52340@@ -7242,7 +7242,7 @@ static int hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
52341 * perform the superfluous readl() after each command submission.
52342 */
52343 if (trans_support & (CFGTBL_Trans_io_accel1 | CFGTBL_Trans_io_accel2))
52344- access = SA5_performant_access_no_read;
52345+ access = &SA5_performant_access_no_read;
52346
52347 /* Controller spec: zero out this buffer. */
52348 for (i = 0; i < h->nreply_queues; i++)
52349@@ -7272,12 +7272,12 @@ static int hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
52350 * enable outbound interrupt coalescing in accelerator mode;
52351 */
52352 if (trans_support & CFGTBL_Trans_io_accel1) {
52353- access = SA5_ioaccel_mode1_access;
52354+ access = &SA5_ioaccel_mode1_access;
52355 writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
52356 writel(4, &h->cfgtable->HostWrite.CoalIntCount);
52357 } else {
52358 if (trans_support & CFGTBL_Trans_io_accel2) {
52359- access = SA5_ioaccel_mode2_access;
52360+ access = &SA5_ioaccel_mode2_access;
52361 writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
52362 writel(4, &h->cfgtable->HostWrite.CoalIntCount);
52363 }
52364diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
52365index 6577130..955f9a4 100644
52366--- a/drivers/scsi/hpsa.h
52367+++ b/drivers/scsi/hpsa.h
52368@@ -143,7 +143,7 @@ struct ctlr_info {
52369 unsigned int msix_vector;
52370 unsigned int msi_vector;
52371 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
52372- struct access_method access;
52373+ struct access_method *access;
52374 char hba_mode_enabled;
52375
52376 /* queue and queue Info */
52377@@ -525,38 +525,38 @@ static unsigned long SA5_ioaccel_mode1_completed(struct ctlr_info *h, u8 q)
52378 }
52379
52380 static struct access_method SA5_access = {
52381- SA5_submit_command,
52382- SA5_intr_mask,
52383- SA5_intr_pending,
52384- SA5_completed,
52385+ .submit_command = SA5_submit_command,
52386+ .set_intr_mask = SA5_intr_mask,
52387+ .intr_pending = SA5_intr_pending,
52388+ .command_completed = SA5_completed,
52389 };
52390
52391 static struct access_method SA5_ioaccel_mode1_access = {
52392- SA5_submit_command,
52393- SA5_performant_intr_mask,
52394- SA5_ioaccel_mode1_intr_pending,
52395- SA5_ioaccel_mode1_completed,
52396+ .submit_command = SA5_submit_command,
52397+ .set_intr_mask = SA5_performant_intr_mask,
52398+ .intr_pending = SA5_ioaccel_mode1_intr_pending,
52399+ .command_completed = SA5_ioaccel_mode1_completed,
52400 };
52401
52402 static struct access_method SA5_ioaccel_mode2_access = {
52403- SA5_submit_command_ioaccel2,
52404- SA5_performant_intr_mask,
52405- SA5_performant_intr_pending,
52406- SA5_performant_completed,
52407+ .submit_command = SA5_submit_command_ioaccel2,
52408+ .set_intr_mask = SA5_performant_intr_mask,
52409+ .intr_pending = SA5_performant_intr_pending,
52410+ .command_completed = SA5_performant_completed,
52411 };
52412
52413 static struct access_method SA5_performant_access = {
52414- SA5_submit_command,
52415- SA5_performant_intr_mask,
52416- SA5_performant_intr_pending,
52417- SA5_performant_completed,
52418+ .submit_command = SA5_submit_command,
52419+ .set_intr_mask = SA5_performant_intr_mask,
52420+ .intr_pending = SA5_performant_intr_pending,
52421+ .command_completed = SA5_performant_completed,
52422 };
52423
52424 static struct access_method SA5_performant_access_no_read = {
52425- SA5_submit_command_no_read,
52426- SA5_performant_intr_mask,
52427- SA5_performant_intr_pending,
52428- SA5_performant_completed,
52429+ .submit_command = SA5_submit_command_no_read,
52430+ .set_intr_mask = SA5_performant_intr_mask,
52431+ .intr_pending = SA5_performant_intr_pending,
52432+ .command_completed = SA5_performant_completed,
52433 };
52434
52435 struct board_type {
52436diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
52437index 1b3a094..068e683 100644
52438--- a/drivers/scsi/libfc/fc_exch.c
52439+++ b/drivers/scsi/libfc/fc_exch.c
52440@@ -101,12 +101,12 @@ struct fc_exch_mgr {
52441 u16 pool_max_index;
52442
52443 struct {
52444- atomic_t no_free_exch;
52445- atomic_t no_free_exch_xid;
52446- atomic_t xid_not_found;
52447- atomic_t xid_busy;
52448- atomic_t seq_not_found;
52449- atomic_t non_bls_resp;
52450+ atomic_unchecked_t no_free_exch;
52451+ atomic_unchecked_t no_free_exch_xid;
52452+ atomic_unchecked_t xid_not_found;
52453+ atomic_unchecked_t xid_busy;
52454+ atomic_unchecked_t seq_not_found;
52455+ atomic_unchecked_t non_bls_resp;
52456 } stats;
52457 };
52458
52459@@ -811,7 +811,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
52460 /* allocate memory for exchange */
52461 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
52462 if (!ep) {
52463- atomic_inc(&mp->stats.no_free_exch);
52464+ atomic_inc_unchecked(&mp->stats.no_free_exch);
52465 goto out;
52466 }
52467 memset(ep, 0, sizeof(*ep));
52468@@ -874,7 +874,7 @@ out:
52469 return ep;
52470 err:
52471 spin_unlock_bh(&pool->lock);
52472- atomic_inc(&mp->stats.no_free_exch_xid);
52473+ atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
52474 mempool_free(ep, mp->ep_pool);
52475 return NULL;
52476 }
52477@@ -1023,7 +1023,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
52478 xid = ntohs(fh->fh_ox_id); /* we originated exch */
52479 ep = fc_exch_find(mp, xid);
52480 if (!ep) {
52481- atomic_inc(&mp->stats.xid_not_found);
52482+ atomic_inc_unchecked(&mp->stats.xid_not_found);
52483 reject = FC_RJT_OX_ID;
52484 goto out;
52485 }
52486@@ -1053,7 +1053,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
52487 ep = fc_exch_find(mp, xid);
52488 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
52489 if (ep) {
52490- atomic_inc(&mp->stats.xid_busy);
52491+ atomic_inc_unchecked(&mp->stats.xid_busy);
52492 reject = FC_RJT_RX_ID;
52493 goto rel;
52494 }
52495@@ -1064,7 +1064,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
52496 }
52497 xid = ep->xid; /* get our XID */
52498 } else if (!ep) {
52499- atomic_inc(&mp->stats.xid_not_found);
52500+ atomic_inc_unchecked(&mp->stats.xid_not_found);
52501 reject = FC_RJT_RX_ID; /* XID not found */
52502 goto out;
52503 }
52504@@ -1082,7 +1082,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
52505 } else {
52506 sp = &ep->seq;
52507 if (sp->id != fh->fh_seq_id) {
52508- atomic_inc(&mp->stats.seq_not_found);
52509+ atomic_inc_unchecked(&mp->stats.seq_not_found);
52510 if (f_ctl & FC_FC_END_SEQ) {
52511 /*
52512 * Update sequence_id based on incoming last
52513@@ -1533,22 +1533,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
52514
52515 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
52516 if (!ep) {
52517- atomic_inc(&mp->stats.xid_not_found);
52518+ atomic_inc_unchecked(&mp->stats.xid_not_found);
52519 goto out;
52520 }
52521 if (ep->esb_stat & ESB_ST_COMPLETE) {
52522- atomic_inc(&mp->stats.xid_not_found);
52523+ atomic_inc_unchecked(&mp->stats.xid_not_found);
52524 goto rel;
52525 }
52526 if (ep->rxid == FC_XID_UNKNOWN)
52527 ep->rxid = ntohs(fh->fh_rx_id);
52528 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
52529- atomic_inc(&mp->stats.xid_not_found);
52530+ atomic_inc_unchecked(&mp->stats.xid_not_found);
52531 goto rel;
52532 }
52533 if (ep->did != ntoh24(fh->fh_s_id) &&
52534 ep->did != FC_FID_FLOGI) {
52535- atomic_inc(&mp->stats.xid_not_found);
52536+ atomic_inc_unchecked(&mp->stats.xid_not_found);
52537 goto rel;
52538 }
52539 sof = fr_sof(fp);
52540@@ -1557,7 +1557,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
52541 sp->ssb_stat |= SSB_ST_RESP;
52542 sp->id = fh->fh_seq_id;
52543 } else if (sp->id != fh->fh_seq_id) {
52544- atomic_inc(&mp->stats.seq_not_found);
52545+ atomic_inc_unchecked(&mp->stats.seq_not_found);
52546 goto rel;
52547 }
52548
52549@@ -1619,9 +1619,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
52550 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
52551
52552 if (!sp)
52553- atomic_inc(&mp->stats.xid_not_found);
52554+ atomic_inc_unchecked(&mp->stats.xid_not_found);
52555 else
52556- atomic_inc(&mp->stats.non_bls_resp);
52557+ atomic_inc_unchecked(&mp->stats.non_bls_resp);
52558
52559 fc_frame_free(fp);
52560 }
52561@@ -2261,13 +2261,13 @@ void fc_exch_update_stats(struct fc_lport *lport)
52562
52563 list_for_each_entry(ema, &lport->ema_list, ema_list) {
52564 mp = ema->mp;
52565- st->fc_no_free_exch += atomic_read(&mp->stats.no_free_exch);
52566+ st->fc_no_free_exch += atomic_read_unchecked(&mp->stats.no_free_exch);
52567 st->fc_no_free_exch_xid +=
52568- atomic_read(&mp->stats.no_free_exch_xid);
52569- st->fc_xid_not_found += atomic_read(&mp->stats.xid_not_found);
52570- st->fc_xid_busy += atomic_read(&mp->stats.xid_busy);
52571- st->fc_seq_not_found += atomic_read(&mp->stats.seq_not_found);
52572- st->fc_non_bls_resp += atomic_read(&mp->stats.non_bls_resp);
52573+ atomic_read_unchecked(&mp->stats.no_free_exch_xid);
52574+ st->fc_xid_not_found += atomic_read_unchecked(&mp->stats.xid_not_found);
52575+ st->fc_xid_busy += atomic_read_unchecked(&mp->stats.xid_busy);
52576+ st->fc_seq_not_found += atomic_read_unchecked(&mp->stats.seq_not_found);
52577+ st->fc_non_bls_resp += atomic_read_unchecked(&mp->stats.non_bls_resp);
52578 }
52579 }
52580 EXPORT_SYMBOL(fc_exch_update_stats);
52581diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
52582index 9c706d8..d3e3ed2 100644
52583--- a/drivers/scsi/libsas/sas_ata.c
52584+++ b/drivers/scsi/libsas/sas_ata.c
52585@@ -535,7 +535,7 @@ static struct ata_port_operations sas_sata_ops = {
52586 .postreset = ata_std_postreset,
52587 .error_handler = ata_std_error_handler,
52588 .post_internal_cmd = sas_ata_post_internal,
52589- .qc_defer = ata_std_qc_defer,
52590+ .qc_defer = ata_std_qc_defer,
52591 .qc_prep = ata_noop_qc_prep,
52592 .qc_issue = sas_ata_qc_issue,
52593 .qc_fill_rtf = sas_ata_qc_fill_rtf,
52594diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
52595index 434e903..5a4a79b 100644
52596--- a/drivers/scsi/lpfc/lpfc.h
52597+++ b/drivers/scsi/lpfc/lpfc.h
52598@@ -430,7 +430,7 @@ struct lpfc_vport {
52599 struct dentry *debug_nodelist;
52600 struct dentry *vport_debugfs_root;
52601 struct lpfc_debugfs_trc *disc_trc;
52602- atomic_t disc_trc_cnt;
52603+ atomic_unchecked_t disc_trc_cnt;
52604 #endif
52605 uint8_t stat_data_enabled;
52606 uint8_t stat_data_blocked;
52607@@ -880,8 +880,8 @@ struct lpfc_hba {
52608 struct timer_list fabric_block_timer;
52609 unsigned long bit_flags;
52610 #define FABRIC_COMANDS_BLOCKED 0
52611- atomic_t num_rsrc_err;
52612- atomic_t num_cmd_success;
52613+ atomic_unchecked_t num_rsrc_err;
52614+ atomic_unchecked_t num_cmd_success;
52615 unsigned long last_rsrc_error_time;
52616 unsigned long last_ramp_down_time;
52617 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
52618@@ -916,7 +916,7 @@ struct lpfc_hba {
52619
52620 struct dentry *debug_slow_ring_trc;
52621 struct lpfc_debugfs_trc *slow_ring_trc;
52622- atomic_t slow_ring_trc_cnt;
52623+ atomic_unchecked_t slow_ring_trc_cnt;
52624 /* iDiag debugfs sub-directory */
52625 struct dentry *idiag_root;
52626 struct dentry *idiag_pci_cfg;
52627diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
52628index 5633e7d..8272114 100644
52629--- a/drivers/scsi/lpfc/lpfc_debugfs.c
52630+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
52631@@ -106,7 +106,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc,
52632
52633 #include <linux/debugfs.h>
52634
52635-static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
52636+static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
52637 static unsigned long lpfc_debugfs_start_time = 0L;
52638
52639 /* iDiag */
52640@@ -147,7 +147,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
52641 lpfc_debugfs_enable = 0;
52642
52643 len = 0;
52644- index = (atomic_read(&vport->disc_trc_cnt) + 1) &
52645+ index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
52646 (lpfc_debugfs_max_disc_trc - 1);
52647 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
52648 dtp = vport->disc_trc + i;
52649@@ -213,7 +213,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
52650 lpfc_debugfs_enable = 0;
52651
52652 len = 0;
52653- index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
52654+ index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
52655 (lpfc_debugfs_max_slow_ring_trc - 1);
52656 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
52657 dtp = phba->slow_ring_trc + i;
52658@@ -646,14 +646,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
52659 !vport || !vport->disc_trc)
52660 return;
52661
52662- index = atomic_inc_return(&vport->disc_trc_cnt) &
52663+ index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
52664 (lpfc_debugfs_max_disc_trc - 1);
52665 dtp = vport->disc_trc + index;
52666 dtp->fmt = fmt;
52667 dtp->data1 = data1;
52668 dtp->data2 = data2;
52669 dtp->data3 = data3;
52670- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
52671+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
52672 dtp->jif = jiffies;
52673 #endif
52674 return;
52675@@ -684,14 +684,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
52676 !phba || !phba->slow_ring_trc)
52677 return;
52678
52679- index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
52680+ index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
52681 (lpfc_debugfs_max_slow_ring_trc - 1);
52682 dtp = phba->slow_ring_trc + index;
52683 dtp->fmt = fmt;
52684 dtp->data1 = data1;
52685 dtp->data2 = data2;
52686 dtp->data3 = data3;
52687- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
52688+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
52689 dtp->jif = jiffies;
52690 #endif
52691 return;
52692@@ -4268,7 +4268,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
52693 "slow_ring buffer\n");
52694 goto debug_failed;
52695 }
52696- atomic_set(&phba->slow_ring_trc_cnt, 0);
52697+ atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
52698 memset(phba->slow_ring_trc, 0,
52699 (sizeof(struct lpfc_debugfs_trc) *
52700 lpfc_debugfs_max_slow_ring_trc));
52701@@ -4314,7 +4314,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
52702 "buffer\n");
52703 goto debug_failed;
52704 }
52705- atomic_set(&vport->disc_trc_cnt, 0);
52706+ atomic_set_unchecked(&vport->disc_trc_cnt, 0);
52707
52708 snprintf(name, sizeof(name), "discovery_trace");
52709 vport->debug_disc_trc =
52710diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
52711index 0b2c53a..aec2b45 100644
52712--- a/drivers/scsi/lpfc/lpfc_init.c
52713+++ b/drivers/scsi/lpfc/lpfc_init.c
52714@@ -11290,8 +11290,10 @@ lpfc_init(void)
52715 "misc_register returned with status %d", error);
52716
52717 if (lpfc_enable_npiv) {
52718- lpfc_transport_functions.vport_create = lpfc_vport_create;
52719- lpfc_transport_functions.vport_delete = lpfc_vport_delete;
52720+ pax_open_kernel();
52721+ *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
52722+ *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
52723+ pax_close_kernel();
52724 }
52725 lpfc_transport_template =
52726 fc_attach_transport(&lpfc_transport_functions);
52727diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
52728index 4f9222e..f1850e3 100644
52729--- a/drivers/scsi/lpfc/lpfc_scsi.c
52730+++ b/drivers/scsi/lpfc/lpfc_scsi.c
52731@@ -261,7 +261,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
52732 unsigned long expires;
52733
52734 spin_lock_irqsave(&phba->hbalock, flags);
52735- atomic_inc(&phba->num_rsrc_err);
52736+ atomic_inc_unchecked(&phba->num_rsrc_err);
52737 phba->last_rsrc_error_time = jiffies;
52738
52739 expires = phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL;
52740@@ -303,8 +303,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
52741 unsigned long num_rsrc_err, num_cmd_success;
52742 int i;
52743
52744- num_rsrc_err = atomic_read(&phba->num_rsrc_err);
52745- num_cmd_success = atomic_read(&phba->num_cmd_success);
52746+ num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
52747+ num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
52748
52749 /*
52750 * The error and success command counters are global per
52751@@ -331,8 +331,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
52752 }
52753 }
52754 lpfc_destroy_vport_work_array(phba, vports);
52755- atomic_set(&phba->num_rsrc_err, 0);
52756- atomic_set(&phba->num_cmd_success, 0);
52757+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
52758+ atomic_set_unchecked(&phba->num_cmd_success, 0);
52759 }
52760
52761 /**
52762diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
52763index 3f26147..ee8efd1 100644
52764--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
52765+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
52766@@ -1509,7 +1509,7 @@ _scsih_get_resync(struct device *dev)
52767 {
52768 struct scsi_device *sdev = to_scsi_device(dev);
52769 struct MPT2SAS_ADAPTER *ioc = shost_priv(sdev->host);
52770- static struct _raid_device *raid_device;
52771+ struct _raid_device *raid_device;
52772 unsigned long flags;
52773 Mpi2RaidVolPage0_t vol_pg0;
52774 Mpi2ConfigReply_t mpi_reply;
52775@@ -1561,7 +1561,7 @@ _scsih_get_state(struct device *dev)
52776 {
52777 struct scsi_device *sdev = to_scsi_device(dev);
52778 struct MPT2SAS_ADAPTER *ioc = shost_priv(sdev->host);
52779- static struct _raid_device *raid_device;
52780+ struct _raid_device *raid_device;
52781 unsigned long flags;
52782 Mpi2RaidVolPage0_t vol_pg0;
52783 Mpi2ConfigReply_t mpi_reply;
52784@@ -6641,7 +6641,7 @@ _scsih_sas_ir_operation_status_event(struct MPT2SAS_ADAPTER *ioc,
52785 Mpi2EventDataIrOperationStatus_t *event_data =
52786 (Mpi2EventDataIrOperationStatus_t *)
52787 fw_event->event_data;
52788- static struct _raid_device *raid_device;
52789+ struct _raid_device *raid_device;
52790 unsigned long flags;
52791 u16 handle;
52792
52793@@ -7112,7 +7112,7 @@ _scsih_scan_for_devices_after_reset(struct MPT2SAS_ADAPTER *ioc)
52794 u64 sas_address;
52795 struct _sas_device *sas_device;
52796 struct _sas_node *expander_device;
52797- static struct _raid_device *raid_device;
52798+ struct _raid_device *raid_device;
52799 u8 retry_count;
52800 unsigned long flags;
52801
52802diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
52803index ed31d8c..ab856b3 100644
52804--- a/drivers/scsi/pmcraid.c
52805+++ b/drivers/scsi/pmcraid.c
52806@@ -200,8 +200,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
52807 res->scsi_dev = scsi_dev;
52808 scsi_dev->hostdata = res;
52809 res->change_detected = 0;
52810- atomic_set(&res->read_failures, 0);
52811- atomic_set(&res->write_failures, 0);
52812+ atomic_set_unchecked(&res->read_failures, 0);
52813+ atomic_set_unchecked(&res->write_failures, 0);
52814 rc = 0;
52815 }
52816 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
52817@@ -2640,9 +2640,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
52818
52819 /* If this was a SCSI read/write command keep count of errors */
52820 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
52821- atomic_inc(&res->read_failures);
52822+ atomic_inc_unchecked(&res->read_failures);
52823 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
52824- atomic_inc(&res->write_failures);
52825+ atomic_inc_unchecked(&res->write_failures);
52826
52827 if (!RES_IS_GSCSI(res->cfg_entry) &&
52828 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
52829@@ -3468,7 +3468,7 @@ static int pmcraid_queuecommand_lck(
52830 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
52831 * hrrq_id assigned here in queuecommand
52832 */
52833- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
52834+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
52835 pinstance->num_hrrq;
52836 cmd->cmd_done = pmcraid_io_done;
52837
52838@@ -3782,7 +3782,7 @@ static long pmcraid_ioctl_passthrough(
52839 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
52840 * hrrq_id assigned here in queuecommand
52841 */
52842- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
52843+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
52844 pinstance->num_hrrq;
52845
52846 if (request_size) {
52847@@ -4420,7 +4420,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
52848
52849 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
52850 /* add resources only after host is added into system */
52851- if (!atomic_read(&pinstance->expose_resources))
52852+ if (!atomic_read_unchecked(&pinstance->expose_resources))
52853 return;
52854
52855 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
52856@@ -5237,8 +5237,8 @@ static int pmcraid_init_instance(struct pci_dev *pdev, struct Scsi_Host *host,
52857 init_waitqueue_head(&pinstance->reset_wait_q);
52858
52859 atomic_set(&pinstance->outstanding_cmds, 0);
52860- atomic_set(&pinstance->last_message_id, 0);
52861- atomic_set(&pinstance->expose_resources, 0);
52862+ atomic_set_unchecked(&pinstance->last_message_id, 0);
52863+ atomic_set_unchecked(&pinstance->expose_resources, 0);
52864
52865 INIT_LIST_HEAD(&pinstance->free_res_q);
52866 INIT_LIST_HEAD(&pinstance->used_res_q);
52867@@ -5951,7 +5951,7 @@ static int pmcraid_probe(struct pci_dev *pdev,
52868 /* Schedule worker thread to handle CCN and take care of adding and
52869 * removing devices to OS
52870 */
52871- atomic_set(&pinstance->expose_resources, 1);
52872+ atomic_set_unchecked(&pinstance->expose_resources, 1);
52873 schedule_work(&pinstance->worker_q);
52874 return rc;
52875
52876diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
52877index e1d150f..6c6df44 100644
52878--- a/drivers/scsi/pmcraid.h
52879+++ b/drivers/scsi/pmcraid.h
52880@@ -748,7 +748,7 @@ struct pmcraid_instance {
52881 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
52882
52883 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
52884- atomic_t last_message_id;
52885+ atomic_unchecked_t last_message_id;
52886
52887 /* configuration table */
52888 struct pmcraid_config_table *cfg_table;
52889@@ -777,7 +777,7 @@ struct pmcraid_instance {
52890 atomic_t outstanding_cmds;
52891
52892 /* should add/delete resources to mid-layer now ?*/
52893- atomic_t expose_resources;
52894+ atomic_unchecked_t expose_resources;
52895
52896
52897
52898@@ -813,8 +813,8 @@ struct pmcraid_resource_entry {
52899 struct pmcraid_config_table_entry_ext cfg_entry_ext;
52900 };
52901 struct scsi_device *scsi_dev; /* Link scsi_device structure */
52902- atomic_t read_failures; /* count of failed READ commands */
52903- atomic_t write_failures; /* count of failed WRITE commands */
52904+ atomic_unchecked_t read_failures; /* count of failed READ commands */
52905+ atomic_unchecked_t write_failures; /* count of failed WRITE commands */
52906
52907 /* To indicate add/delete/modify during CCN */
52908 u8 change_detected;
52909diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
52910index 82b92c4..3178171 100644
52911--- a/drivers/scsi/qla2xxx/qla_attr.c
52912+++ b/drivers/scsi/qla2xxx/qla_attr.c
52913@@ -2192,7 +2192,7 @@ qla24xx_vport_disable(struct fc_vport *fc_vport, bool disable)
52914 return 0;
52915 }
52916
52917-struct fc_function_template qla2xxx_transport_functions = {
52918+fc_function_template_no_const qla2xxx_transport_functions = {
52919
52920 .show_host_node_name = 1,
52921 .show_host_port_name = 1,
52922@@ -2240,7 +2240,7 @@ struct fc_function_template qla2xxx_transport_functions = {
52923 .bsg_timeout = qla24xx_bsg_timeout,
52924 };
52925
52926-struct fc_function_template qla2xxx_transport_vport_functions = {
52927+fc_function_template_no_const qla2xxx_transport_vport_functions = {
52928
52929 .show_host_node_name = 1,
52930 .show_host_port_name = 1,
52931diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
52932index 7686bfe..4710893 100644
52933--- a/drivers/scsi/qla2xxx/qla_gbl.h
52934+++ b/drivers/scsi/qla2xxx/qla_gbl.h
52935@@ -571,8 +571,8 @@ extern void qla2x00_get_sym_node_name(scsi_qla_host_t *, uint8_t *, size_t);
52936 struct device_attribute;
52937 extern struct device_attribute *qla2x00_host_attrs[];
52938 struct fc_function_template;
52939-extern struct fc_function_template qla2xxx_transport_functions;
52940-extern struct fc_function_template qla2xxx_transport_vport_functions;
52941+extern fc_function_template_no_const qla2xxx_transport_functions;
52942+extern fc_function_template_no_const qla2xxx_transport_vport_functions;
52943 extern void qla2x00_alloc_sysfs_attr(scsi_qla_host_t *);
52944 extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *, bool);
52945 extern void qla2x00_init_host_attr(scsi_qla_host_t *);
52946diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
52947index cce1cbc..5b9f0fe 100644
52948--- a/drivers/scsi/qla2xxx/qla_os.c
52949+++ b/drivers/scsi/qla2xxx/qla_os.c
52950@@ -1435,8 +1435,10 @@ qla2x00_config_dma_addressing(struct qla_hw_data *ha)
52951 !pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
52952 /* Ok, a 64bit DMA mask is applicable. */
52953 ha->flags.enable_64bit_addressing = 1;
52954- ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
52955- ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
52956+ pax_open_kernel();
52957+ *(void **)&ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
52958+ *(void **)&ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
52959+ pax_close_kernel();
52960 return;
52961 }
52962 }
52963diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
52964index 8f6d0fb..1b21097 100644
52965--- a/drivers/scsi/qla4xxx/ql4_def.h
52966+++ b/drivers/scsi/qla4xxx/ql4_def.h
52967@@ -305,7 +305,7 @@ struct ddb_entry {
52968 * (4000 only) */
52969 atomic_t relogin_timer; /* Max Time to wait for
52970 * relogin to complete */
52971- atomic_t relogin_retry_count; /* Num of times relogin has been
52972+ atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
52973 * retried */
52974 uint32_t default_time2wait; /* Default Min time between
52975 * relogins (+aens) */
52976diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
52977index 6d25879..3031a9f 100644
52978--- a/drivers/scsi/qla4xxx/ql4_os.c
52979+++ b/drivers/scsi/qla4xxx/ql4_os.c
52980@@ -4491,12 +4491,12 @@ static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
52981 */
52982 if (!iscsi_is_session_online(cls_sess)) {
52983 /* Reset retry relogin timer */
52984- atomic_inc(&ddb_entry->relogin_retry_count);
52985+ atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
52986 DEBUG2(ql4_printk(KERN_INFO, ha,
52987 "%s: index[%d] relogin timed out-retrying"
52988 " relogin (%d), retry (%d)\n", __func__,
52989 ddb_entry->fw_ddb_index,
52990- atomic_read(&ddb_entry->relogin_retry_count),
52991+ atomic_read_unchecked(&ddb_entry->relogin_retry_count),
52992 ddb_entry->default_time2wait + 4));
52993 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
52994 atomic_set(&ddb_entry->retry_relogin_timer,
52995@@ -6604,7 +6604,7 @@ static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
52996
52997 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
52998 atomic_set(&ddb_entry->relogin_timer, 0);
52999- atomic_set(&ddb_entry->relogin_retry_count, 0);
53000+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
53001 def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
53002 ddb_entry->default_relogin_timeout =
53003 (def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ?
53004diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
53005index c9c3b57..22a8e41 100644
53006--- a/drivers/scsi/scsi.c
53007+++ b/drivers/scsi/scsi.c
53008@@ -637,7 +637,7 @@ void scsi_finish_command(struct scsi_cmnd *cmd)
53009
53010 good_bytes = scsi_bufflen(cmd);
53011 if (cmd->request->cmd_type != REQ_TYPE_BLOCK_PC) {
53012- int old_good_bytes = good_bytes;
53013+ unsigned int old_good_bytes = good_bytes;
53014 drv = scsi_cmd_to_driver(cmd);
53015 if (drv->done)
53016 good_bytes = drv->done(cmd);
53017diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
53018index b1a2631..5bcd9c8 100644
53019--- a/drivers/scsi/scsi_lib.c
53020+++ b/drivers/scsi/scsi_lib.c
53021@@ -1597,7 +1597,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
53022 shost = sdev->host;
53023 scsi_init_cmd_errh(cmd);
53024 cmd->result = DID_NO_CONNECT << 16;
53025- atomic_inc(&cmd->device->iorequest_cnt);
53026+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
53027
53028 /*
53029 * SCSI request completion path will do scsi_device_unbusy(),
53030@@ -1620,9 +1620,9 @@ static void scsi_softirq_done(struct request *rq)
53031
53032 INIT_LIST_HEAD(&cmd->eh_entry);
53033
53034- atomic_inc(&cmd->device->iodone_cnt);
53035+ atomic_inc_unchecked(&cmd->device->iodone_cnt);
53036 if (cmd->result)
53037- atomic_inc(&cmd->device->ioerr_cnt);
53038+ atomic_inc_unchecked(&cmd->device->ioerr_cnt);
53039
53040 disposition = scsi_decide_disposition(cmd);
53041 if (disposition != SUCCESS &&
53042@@ -1663,7 +1663,7 @@ static int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
53043 struct Scsi_Host *host = cmd->device->host;
53044 int rtn = 0;
53045
53046- atomic_inc(&cmd->device->iorequest_cnt);
53047+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
53048
53049 /* check if the device is still usable */
53050 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
53051diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
53052index 1ac38e7..6acc656 100644
53053--- a/drivers/scsi/scsi_sysfs.c
53054+++ b/drivers/scsi/scsi_sysfs.c
53055@@ -788,7 +788,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
53056 char *buf) \
53057 { \
53058 struct scsi_device *sdev = to_scsi_device(dev); \
53059- unsigned long long count = atomic_read(&sdev->field); \
53060+ unsigned long long count = atomic_read_unchecked(&sdev->field); \
53061 return snprintf(buf, 20, "0x%llx\n", count); \
53062 } \
53063 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
53064diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
53065index 5d6f348..18778a6b 100644
53066--- a/drivers/scsi/scsi_transport_fc.c
53067+++ b/drivers/scsi/scsi_transport_fc.c
53068@@ -501,7 +501,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_class,
53069 * Netlink Infrastructure
53070 */
53071
53072-static atomic_t fc_event_seq;
53073+static atomic_unchecked_t fc_event_seq;
53074
53075 /**
53076 * fc_get_event_number - Obtain the next sequential FC event number
53077@@ -514,7 +514,7 @@ static atomic_t fc_event_seq;
53078 u32
53079 fc_get_event_number(void)
53080 {
53081- return atomic_add_return(1, &fc_event_seq);
53082+ return atomic_add_return_unchecked(1, &fc_event_seq);
53083 }
53084 EXPORT_SYMBOL(fc_get_event_number);
53085
53086@@ -658,7 +658,7 @@ static __init int fc_transport_init(void)
53087 {
53088 int error;
53089
53090- atomic_set(&fc_event_seq, 0);
53091+ atomic_set_unchecked(&fc_event_seq, 0);
53092
53093 error = transport_class_register(&fc_host_class);
53094 if (error)
53095@@ -848,7 +848,7 @@ static int fc_str_to_dev_loss(const char *buf, unsigned long *val)
53096 char *cp;
53097
53098 *val = simple_strtoul(buf, &cp, 0);
53099- if ((*cp && (*cp != '\n')) || (*val < 0))
53100+ if (*cp && (*cp != '\n'))
53101 return -EINVAL;
53102 /*
53103 * Check for overflow; dev_loss_tmo is u32
53104diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
53105index 67d43e3..8cee73c 100644
53106--- a/drivers/scsi/scsi_transport_iscsi.c
53107+++ b/drivers/scsi/scsi_transport_iscsi.c
53108@@ -79,7 +79,7 @@ struct iscsi_internal {
53109 struct transport_container session_cont;
53110 };
53111
53112-static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
53113+static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
53114 static struct workqueue_struct *iscsi_eh_timer_workq;
53115
53116 static DEFINE_IDA(iscsi_sess_ida);
53117@@ -2071,7 +2071,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
53118 int err;
53119
53120 ihost = shost->shost_data;
53121- session->sid = atomic_add_return(1, &iscsi_session_nr);
53122+ session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
53123
53124 if (target_id == ISCSI_MAX_TARGET) {
53125 id = ida_simple_get(&iscsi_sess_ida, 0, 0, GFP_KERNEL);
53126@@ -4515,7 +4515,7 @@ static __init int iscsi_transport_init(void)
53127 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
53128 ISCSI_TRANSPORT_VERSION);
53129
53130- atomic_set(&iscsi_session_nr, 0);
53131+ atomic_set_unchecked(&iscsi_session_nr, 0);
53132
53133 err = class_register(&iscsi_transport_class);
53134 if (err)
53135diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
53136index ae45bd9..c32a586 100644
53137--- a/drivers/scsi/scsi_transport_srp.c
53138+++ b/drivers/scsi/scsi_transport_srp.c
53139@@ -35,7 +35,7 @@
53140 #include "scsi_priv.h"
53141
53142 struct srp_host_attrs {
53143- atomic_t next_port_id;
53144+ atomic_unchecked_t next_port_id;
53145 };
53146 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
53147
53148@@ -100,7 +100,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
53149 struct Scsi_Host *shost = dev_to_shost(dev);
53150 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
53151
53152- atomic_set(&srp_host->next_port_id, 0);
53153+ atomic_set_unchecked(&srp_host->next_port_id, 0);
53154 return 0;
53155 }
53156
53157@@ -734,7 +734,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
53158 rport_fast_io_fail_timedout);
53159 INIT_DELAYED_WORK(&rport->dev_loss_work, rport_dev_loss_timedout);
53160
53161- id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
53162+ id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
53163 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
53164
53165 transport_setup_device(&rport->dev);
53166diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
53167index a661d33..1b233fa 100644
53168--- a/drivers/scsi/sd.c
53169+++ b/drivers/scsi/sd.c
53170@@ -111,7 +111,7 @@ static int sd_resume(struct device *);
53171 static void sd_rescan(struct device *);
53172 static int sd_init_command(struct scsi_cmnd *SCpnt);
53173 static void sd_uninit_command(struct scsi_cmnd *SCpnt);
53174-static int sd_done(struct scsi_cmnd *);
53175+static unsigned int sd_done(struct scsi_cmnd *);
53176 static int sd_eh_action(struct scsi_cmnd *, int);
53177 static void sd_read_capacity(struct scsi_disk *sdkp, unsigned char *buffer);
53178 static void scsi_disk_release(struct device *cdev);
53179@@ -1670,7 +1670,7 @@ static unsigned int sd_completed_bytes(struct scsi_cmnd *scmd)
53180 *
53181 * Note: potentially run from within an ISR. Must not block.
53182 **/
53183-static int sd_done(struct scsi_cmnd *SCpnt)
53184+static unsigned int sd_done(struct scsi_cmnd *SCpnt)
53185 {
53186 int result = SCpnt->result;
53187 unsigned int good_bytes = result ? 0 : scsi_bufflen(SCpnt);
53188@@ -2997,7 +2997,7 @@ static int sd_probe(struct device *dev)
53189 sdkp->disk = gd;
53190 sdkp->index = index;
53191 atomic_set(&sdkp->openers, 0);
53192- atomic_set(&sdkp->device->ioerr_cnt, 0);
53193+ atomic_set_unchecked(&sdkp->device->ioerr_cnt, 0);
53194
53195 if (!sdp->request_queue->rq_timeout) {
53196 if (sdp->type != TYPE_MOD)
53197diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
53198index 2270bd5..98408a5 100644
53199--- a/drivers/scsi/sg.c
53200+++ b/drivers/scsi/sg.c
53201@@ -1083,7 +1083,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
53202 sdp->disk->disk_name,
53203 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
53204 NULL,
53205- (char *)arg);
53206+ (char __user *)arg);
53207 case BLKTRACESTART:
53208 return blk_trace_startstop(sdp->device->request_queue, 1);
53209 case BLKTRACESTOP:
53210diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
53211index 8bd54a6..dd037a5 100644
53212--- a/drivers/scsi/sr.c
53213+++ b/drivers/scsi/sr.c
53214@@ -80,7 +80,7 @@ static DEFINE_MUTEX(sr_mutex);
53215 static int sr_probe(struct device *);
53216 static int sr_remove(struct device *);
53217 static int sr_init_command(struct scsi_cmnd *SCpnt);
53218-static int sr_done(struct scsi_cmnd *);
53219+static unsigned int sr_done(struct scsi_cmnd *);
53220 static int sr_runtime_suspend(struct device *dev);
53221
53222 static struct dev_pm_ops sr_pm_ops = {
53223@@ -312,11 +312,11 @@ do_tur:
53224 * It will be notified on the end of a SCSI read / write, and will take one
53225 * of several actions based on success or failure.
53226 */
53227-static int sr_done(struct scsi_cmnd *SCpnt)
53228+static unsigned int sr_done(struct scsi_cmnd *SCpnt)
53229 {
53230 int result = SCpnt->result;
53231- int this_count = scsi_bufflen(SCpnt);
53232- int good_bytes = (result == 0 ? this_count : 0);
53233+ unsigned int this_count = scsi_bufflen(SCpnt);
53234+ unsigned int good_bytes = (result == 0 ? this_count : 0);
53235 int block_sectors = 0;
53236 long error_sector;
53237 struct scsi_cd *cd = scsi_cd(SCpnt->request->rq_disk);
53238diff --git a/drivers/soc/tegra/fuse/fuse-tegra.c b/drivers/soc/tegra/fuse/fuse-tegra.c
53239index c0d660f..24a5854 100644
53240--- a/drivers/soc/tegra/fuse/fuse-tegra.c
53241+++ b/drivers/soc/tegra/fuse/fuse-tegra.c
53242@@ -71,7 +71,7 @@ static ssize_t fuse_read(struct file *fd, struct kobject *kobj,
53243 return i;
53244 }
53245
53246-static struct bin_attribute fuse_bin_attr = {
53247+static bin_attribute_no_const fuse_bin_attr = {
53248 .attr = { .name = "fuse", .mode = S_IRUGO, },
53249 .read = fuse_read,
53250 };
53251diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
53252index 57a1950..ae54e21 100644
53253--- a/drivers/spi/spi.c
53254+++ b/drivers/spi/spi.c
53255@@ -2307,7 +2307,7 @@ int spi_bus_unlock(struct spi_master *master)
53256 EXPORT_SYMBOL_GPL(spi_bus_unlock);
53257
53258 /* portable code must never pass more than 32 bytes */
53259-#define SPI_BUFSIZ max(32, SMP_CACHE_BYTES)
53260+#define SPI_BUFSIZ max(32UL, SMP_CACHE_BYTES)
53261
53262 static u8 *buf;
53263
53264diff --git a/drivers/staging/android/timed_output.c b/drivers/staging/android/timed_output.c
53265index b41429f..2de5373 100644
53266--- a/drivers/staging/android/timed_output.c
53267+++ b/drivers/staging/android/timed_output.c
53268@@ -25,7 +25,7 @@
53269 #include "timed_output.h"
53270
53271 static struct class *timed_output_class;
53272-static atomic_t device_count;
53273+static atomic_unchecked_t device_count;
53274
53275 static ssize_t enable_show(struct device *dev, struct device_attribute *attr,
53276 char *buf)
53277@@ -65,7 +65,7 @@ static int create_timed_output_class(void)
53278 timed_output_class = class_create(THIS_MODULE, "timed_output");
53279 if (IS_ERR(timed_output_class))
53280 return PTR_ERR(timed_output_class);
53281- atomic_set(&device_count, 0);
53282+ atomic_set_unchecked(&device_count, 0);
53283 timed_output_class->dev_groups = timed_output_groups;
53284 }
53285
53286@@ -83,7 +83,7 @@ int timed_output_dev_register(struct timed_output_dev *tdev)
53287 if (ret < 0)
53288 return ret;
53289
53290- tdev->index = atomic_inc_return(&device_count);
53291+ tdev->index = atomic_inc_return_unchecked(&device_count);
53292 tdev->dev = device_create(timed_output_class, NULL,
53293 MKDEV(0, tdev->index), NULL, "%s", tdev->name);
53294 if (IS_ERR(tdev->dev))
53295diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
53296index 727640e..55bf61c 100644
53297--- a/drivers/staging/comedi/comedi_fops.c
53298+++ b/drivers/staging/comedi/comedi_fops.c
53299@@ -297,8 +297,8 @@ static void comedi_file_reset(struct file *file)
53300 }
53301 cfp->last_attached = dev->attached;
53302 cfp->last_detach_count = dev->detach_count;
53303- ACCESS_ONCE(cfp->read_subdev) = read_s;
53304- ACCESS_ONCE(cfp->write_subdev) = write_s;
53305+ ACCESS_ONCE_RW(cfp->read_subdev) = read_s;
53306+ ACCESS_ONCE_RW(cfp->write_subdev) = write_s;
53307 }
53308
53309 static void comedi_file_check(struct file *file)
53310@@ -1924,7 +1924,7 @@ static int do_setrsubd_ioctl(struct comedi_device *dev, unsigned long arg,
53311 !(s_old->async->cmd.flags & CMDF_WRITE))
53312 return -EBUSY;
53313
53314- ACCESS_ONCE(cfp->read_subdev) = s_new;
53315+ ACCESS_ONCE_RW(cfp->read_subdev) = s_new;
53316 return 0;
53317 }
53318
53319@@ -1966,7 +1966,7 @@ static int do_setwsubd_ioctl(struct comedi_device *dev, unsigned long arg,
53320 (s_old->async->cmd.flags & CMDF_WRITE))
53321 return -EBUSY;
53322
53323- ACCESS_ONCE(cfp->write_subdev) = s_new;
53324+ ACCESS_ONCE_RW(cfp->write_subdev) = s_new;
53325 return 0;
53326 }
53327
53328diff --git a/drivers/staging/fbtft/fbtft-core.c b/drivers/staging/fbtft/fbtft-core.c
53329index 37dcf7e..f3c2016 100644
53330--- a/drivers/staging/fbtft/fbtft-core.c
53331+++ b/drivers/staging/fbtft/fbtft-core.c
53332@@ -689,7 +689,7 @@ struct fb_info *fbtft_framebuffer_alloc(struct fbtft_display *display,
53333 {
53334 struct fb_info *info;
53335 struct fbtft_par *par;
53336- struct fb_ops *fbops = NULL;
53337+ fb_ops_no_const *fbops = NULL;
53338 struct fb_deferred_io *fbdefio = NULL;
53339 struct fbtft_platform_data *pdata = dev->platform_data;
53340 u8 *vmem = NULL;
53341diff --git a/drivers/staging/fbtft/fbtft.h b/drivers/staging/fbtft/fbtft.h
53342index 0dbf3f9..fed0063 100644
53343--- a/drivers/staging/fbtft/fbtft.h
53344+++ b/drivers/staging/fbtft/fbtft.h
53345@@ -106,7 +106,7 @@ struct fbtft_ops {
53346
53347 int (*set_var)(struct fbtft_par *par);
53348 int (*set_gamma)(struct fbtft_par *par, unsigned long *curves);
53349-};
53350+} __no_const;
53351
53352 /**
53353 * struct fbtft_display - Describes the display properties
53354diff --git a/drivers/staging/gdm724x/gdm_tty.c b/drivers/staging/gdm724x/gdm_tty.c
53355index 001348c..cfaac8a 100644
53356--- a/drivers/staging/gdm724x/gdm_tty.c
53357+++ b/drivers/staging/gdm724x/gdm_tty.c
53358@@ -44,7 +44,7 @@
53359 #define gdm_tty_send_control(n, r, v, d, l) (\
53360 n->tty_dev->send_control(n->tty_dev->priv_dev, r, v, d, l))
53361
53362-#define GDM_TTY_READY(gdm) (gdm && gdm->tty_dev && gdm->port.count)
53363+#define GDM_TTY_READY(gdm) (gdm && gdm->tty_dev && atomic_read(&gdm->port.count))
53364
53365 static struct tty_driver *gdm_driver[TTY_MAX_COUNT];
53366 static struct gdm *gdm_table[TTY_MAX_COUNT][GDM_TTY_MINOR];
53367diff --git a/drivers/staging/i2o/i2o.h b/drivers/staging/i2o/i2o.h
53368index d23c3c2..eb63c81 100644
53369--- a/drivers/staging/i2o/i2o.h
53370+++ b/drivers/staging/i2o/i2o.h
53371@@ -565,7 +565,7 @@ struct i2o_controller {
53372 struct i2o_device *exec; /* Executive */
53373 #if BITS_PER_LONG == 64
53374 spinlock_t context_list_lock; /* lock for context_list */
53375- atomic_t context_list_counter; /* needed for unique contexts */
53376+ atomic_unchecked_t context_list_counter; /* needed for unique contexts */
53377 struct list_head context_list; /* list of context id's
53378 and pointers */
53379 #endif
53380diff --git a/drivers/staging/i2o/i2o_proc.c b/drivers/staging/i2o/i2o_proc.c
53381index ad84f33..c5bdf65 100644
53382--- a/drivers/staging/i2o/i2o_proc.c
53383+++ b/drivers/staging/i2o/i2o_proc.c
53384@@ -255,12 +255,6 @@ static char *scsi_devices[] = {
53385 "Array Controller Device"
53386 };
53387
53388-static char *chtostr(char *tmp, u8 *chars, int n)
53389-{
53390- tmp[0] = 0;
53391- return strncat(tmp, (char *)chars, n);
53392-}
53393-
53394 static int i2o_report_query_status(struct seq_file *seq, int block_status,
53395 char *group)
53396 {
53397@@ -707,9 +701,9 @@ static int i2o_seq_show_status(struct seq_file *seq, void *v)
53398 static int i2o_seq_show_hw(struct seq_file *seq, void *v)
53399 {
53400 struct i2o_controller *c = (struct i2o_controller *)seq->private;
53401- static u32 work32[5];
53402- static u8 *work8 = (u8 *) work32;
53403- static u16 *work16 = (u16 *) work32;
53404+ u32 work32[5];
53405+ u8 *work8 = (u8 *) work32;
53406+ u16 *work16 = (u16 *) work32;
53407 int token;
53408 u32 hwcap;
53409
53410@@ -790,7 +784,6 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
53411 } *result;
53412
53413 i2o_exec_execute_ddm_table ddm_table;
53414- char tmp[28 + 1];
53415
53416 result = kmalloc(sizeof(*result), GFP_KERNEL);
53417 if (!result)
53418@@ -825,8 +818,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
53419
53420 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
53421 seq_printf(seq, "%-#8x", ddm_table.module_id);
53422- seq_printf(seq, "%-29s",
53423- chtostr(tmp, ddm_table.module_name_version, 28));
53424+ seq_printf(seq, "%-.28s", ddm_table.module_name_version);
53425 seq_printf(seq, "%9d ", ddm_table.data_size);
53426 seq_printf(seq, "%8d", ddm_table.code_size);
53427
53428@@ -893,7 +885,6 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
53429
53430 i2o_driver_result_table *result;
53431 i2o_driver_store_table *dst;
53432- char tmp[28 + 1];
53433
53434 result = kmalloc(sizeof(i2o_driver_result_table), GFP_KERNEL);
53435 if (result == NULL)
53436@@ -928,9 +919,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
53437
53438 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
53439 seq_printf(seq, "%-#8x", dst->module_id);
53440- seq_printf(seq, "%-29s",
53441- chtostr(tmp, dst->module_name_version, 28));
53442- seq_printf(seq, "%-9s", chtostr(tmp, dst->date, 8));
53443+ seq_printf(seq, "%-.28s", dst->module_name_version);
53444+ seq_printf(seq, "%-.8s", dst->date);
53445 seq_printf(seq, "%8d ", dst->module_size);
53446 seq_printf(seq, "%8d ", dst->mpb_size);
53447 seq_printf(seq, "0x%04x", dst->module_flags);
53448@@ -1246,11 +1236,10 @@ static int i2o_seq_show_authorized_users(struct seq_file *seq, void *v)
53449 static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
53450 {
53451 struct i2o_device *d = (struct i2o_device *)seq->private;
53452- static u32 work32[128]; // allow for "stuff" + up to 256 byte (max) serial number
53453+ u32 work32[128]; // allow for "stuff" + up to 256 byte (max) serial number
53454 // == (allow) 512d bytes (max)
53455- static u16 *work16 = (u16 *) work32;
53456+ u16 *work16 = (u16 *) work32;
53457 int token;
53458- char tmp[16 + 1];
53459
53460 token = i2o_parm_field_get(d, 0xF100, -1, &work32, sizeof(work32));
53461
53462@@ -1262,14 +1251,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
53463 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
53464 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
53465 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
53466- seq_printf(seq, "Vendor info : %s\n",
53467- chtostr(tmp, (u8 *) (work32 + 2), 16));
53468- seq_printf(seq, "Product info : %s\n",
53469- chtostr(tmp, (u8 *) (work32 + 6), 16));
53470- seq_printf(seq, "Description : %s\n",
53471- chtostr(tmp, (u8 *) (work32 + 10), 16));
53472- seq_printf(seq, "Product rev. : %s\n",
53473- chtostr(tmp, (u8 *) (work32 + 14), 8));
53474+ seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
53475+ seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
53476+ seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
53477+ seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
53478
53479 seq_printf(seq, "Serial number : ");
53480 print_serial_number(seq, (u8 *) (work32 + 16),
53481@@ -1306,8 +1291,6 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
53482 u8 pad[256]; // allow up to 256 byte (max) serial number
53483 } result;
53484
53485- char tmp[24 + 1];
53486-
53487 token = i2o_parm_field_get(d, 0xF101, -1, &result, sizeof(result));
53488
53489 if (token < 0) {
53490@@ -1316,10 +1299,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
53491 }
53492
53493 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
53494- seq_printf(seq, "Module name : %s\n",
53495- chtostr(tmp, result.module_name, 24));
53496- seq_printf(seq, "Module revision : %s\n",
53497- chtostr(tmp, result.module_rev, 8));
53498+ seq_printf(seq, "Module name : %.24s\n", result.module_name);
53499+ seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
53500
53501 seq_printf(seq, "Serial number : ");
53502 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
53503@@ -1343,8 +1324,6 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
53504 u8 instance_number[4];
53505 } result;
53506
53507- char tmp[64 + 1];
53508-
53509 token = i2o_parm_field_get(d, 0xF102, -1, &result, sizeof(result));
53510
53511 if (token < 0) {
53512@@ -1352,14 +1331,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
53513 return 0;
53514 }
53515
53516- seq_printf(seq, "Device name : %s\n",
53517- chtostr(tmp, result.device_name, 64));
53518- seq_printf(seq, "Service name : %s\n",
53519- chtostr(tmp, result.service_name, 64));
53520- seq_printf(seq, "Physical name : %s\n",
53521- chtostr(tmp, result.physical_location, 64));
53522- seq_printf(seq, "Instance number : %s\n",
53523- chtostr(tmp, result.instance_number, 4));
53524+ seq_printf(seq, "Device name : %.64s\n", result.device_name);
53525+ seq_printf(seq, "Service name : %.64s\n", result.service_name);
53526+ seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
53527+ seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
53528
53529 return 0;
53530 }
53531@@ -1368,9 +1343,9 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
53532 static int i2o_seq_show_sgl_limits(struct seq_file *seq, void *v)
53533 {
53534 struct i2o_device *d = (struct i2o_device *)seq->private;
53535- static u32 work32[12];
53536- static u16 *work16 = (u16 *) work32;
53537- static u8 *work8 = (u8 *) work32;
53538+ u32 work32[12];
53539+ u16 *work16 = (u16 *) work32;
53540+ u8 *work8 = (u8 *) work32;
53541 int token;
53542
53543 token = i2o_parm_field_get(d, 0xF103, -1, &work32, sizeof(work32));
53544diff --git a/drivers/staging/i2o/iop.c b/drivers/staging/i2o/iop.c
53545index 52334fc..d7f40b3 100644
53546--- a/drivers/staging/i2o/iop.c
53547+++ b/drivers/staging/i2o/iop.c
53548@@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
53549
53550 spin_lock_irqsave(&c->context_list_lock, flags);
53551
53552- if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
53553- atomic_inc(&c->context_list_counter);
53554+ if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
53555+ atomic_inc_unchecked(&c->context_list_counter);
53556
53557- entry->context = atomic_read(&c->context_list_counter);
53558+ entry->context = atomic_read_unchecked(&c->context_list_counter);
53559
53560 list_add(&entry->list, &c->context_list);
53561
53562@@ -1076,7 +1076,7 @@ struct i2o_controller *i2o_iop_alloc(void)
53563
53564 #if BITS_PER_LONG == 64
53565 spin_lock_init(&c->context_list_lock);
53566- atomic_set(&c->context_list_counter, 0);
53567+ atomic_set_unchecked(&c->context_list_counter, 0);
53568 INIT_LIST_HEAD(&c->context_list);
53569 #endif
53570
53571diff --git a/drivers/staging/lustre/lnet/selftest/brw_test.c b/drivers/staging/lustre/lnet/selftest/brw_test.c
53572index 463da07..e791ce9 100644
53573--- a/drivers/staging/lustre/lnet/selftest/brw_test.c
53574+++ b/drivers/staging/lustre/lnet/selftest/brw_test.c
53575@@ -488,13 +488,11 @@ brw_server_handle(struct srpc_server_rpc *rpc)
53576 return 0;
53577 }
53578
53579-sfw_test_client_ops_t brw_test_client;
53580-void brw_init_test_client(void)
53581-{
53582- brw_test_client.tso_init = brw_client_init;
53583- brw_test_client.tso_fini = brw_client_fini;
53584- brw_test_client.tso_prep_rpc = brw_client_prep_rpc;
53585- brw_test_client.tso_done_rpc = brw_client_done_rpc;
53586+sfw_test_client_ops_t brw_test_client = {
53587+ .tso_init = brw_client_init,
53588+ .tso_fini = brw_client_fini,
53589+ .tso_prep_rpc = brw_client_prep_rpc,
53590+ .tso_done_rpc = brw_client_done_rpc,
53591 };
53592
53593 srpc_service_t brw_test_service;
53594diff --git a/drivers/staging/lustre/lnet/selftest/framework.c b/drivers/staging/lustre/lnet/selftest/framework.c
53595index 5709148..ccd9e0d 100644
53596--- a/drivers/staging/lustre/lnet/selftest/framework.c
53597+++ b/drivers/staging/lustre/lnet/selftest/framework.c
53598@@ -1628,12 +1628,10 @@ static srpc_service_t sfw_services[] = {
53599
53600 extern sfw_test_client_ops_t ping_test_client;
53601 extern srpc_service_t ping_test_service;
53602-extern void ping_init_test_client(void);
53603 extern void ping_init_test_service(void);
53604
53605 extern sfw_test_client_ops_t brw_test_client;
53606 extern srpc_service_t brw_test_service;
53607-extern void brw_init_test_client(void);
53608 extern void brw_init_test_service(void);
53609
53610
53611@@ -1675,12 +1673,10 @@ sfw_startup (void)
53612 INIT_LIST_HEAD(&sfw_data.fw_zombie_rpcs);
53613 INIT_LIST_HEAD(&sfw_data.fw_zombie_sessions);
53614
53615- brw_init_test_client();
53616 brw_init_test_service();
53617 rc = sfw_register_test(&brw_test_service, &brw_test_client);
53618 LASSERT (rc == 0);
53619
53620- ping_init_test_client();
53621 ping_init_test_service();
53622 rc = sfw_register_test(&ping_test_service, &ping_test_client);
53623 LASSERT (rc == 0);
53624diff --git a/drivers/staging/lustre/lnet/selftest/ping_test.c b/drivers/staging/lustre/lnet/selftest/ping_test.c
53625index d8c0df6..5041cbb 100644
53626--- a/drivers/staging/lustre/lnet/selftest/ping_test.c
53627+++ b/drivers/staging/lustre/lnet/selftest/ping_test.c
53628@@ -211,14 +211,12 @@ ping_server_handle(struct srpc_server_rpc *rpc)
53629 return 0;
53630 }
53631
53632-sfw_test_client_ops_t ping_test_client;
53633-void ping_init_test_client(void)
53634-{
53635- ping_test_client.tso_init = ping_client_init;
53636- ping_test_client.tso_fini = ping_client_fini;
53637- ping_test_client.tso_prep_rpc = ping_client_prep_rpc;
53638- ping_test_client.tso_done_rpc = ping_client_done_rpc;
53639-}
53640+sfw_test_client_ops_t ping_test_client = {
53641+ .tso_init = ping_client_init,
53642+ .tso_fini = ping_client_fini,
53643+ .tso_prep_rpc = ping_client_prep_rpc,
53644+ .tso_done_rpc = ping_client_done_rpc,
53645+};
53646
53647 srpc_service_t ping_test_service;
53648 void ping_init_test_service(void)
53649diff --git a/drivers/staging/lustre/lustre/include/lustre_dlm.h b/drivers/staging/lustre/lustre/include/lustre_dlm.h
53650index 83bc0a9..12ba00a 100644
53651--- a/drivers/staging/lustre/lustre/include/lustre_dlm.h
53652+++ b/drivers/staging/lustre/lustre/include/lustre_dlm.h
53653@@ -1139,7 +1139,7 @@ struct ldlm_callback_suite {
53654 ldlm_completion_callback lcs_completion;
53655 ldlm_blocking_callback lcs_blocking;
53656 ldlm_glimpse_callback lcs_glimpse;
53657-};
53658+} __no_const;
53659
53660 /* ldlm_lockd.c */
53661 int ldlm_del_waiting_lock(struct ldlm_lock *lock);
53662diff --git a/drivers/staging/lustre/lustre/include/obd.h b/drivers/staging/lustre/lustre/include/obd.h
53663index 2a88b80..62e7e5f 100644
53664--- a/drivers/staging/lustre/lustre/include/obd.h
53665+++ b/drivers/staging/lustre/lustre/include/obd.h
53666@@ -1362,7 +1362,7 @@ struct md_ops {
53667 * lprocfs_alloc_md_stats() in obdclass/lprocfs_status.c. Also, add a
53668 * wrapper function in include/linux/obd_class.h.
53669 */
53670-};
53671+} __no_const;
53672
53673 struct lsm_operations {
53674 void (*lsm_free)(struct lov_stripe_md *);
53675diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
53676index a4c252f..b21acac 100644
53677--- a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
53678+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
53679@@ -258,7 +258,7 @@ ldlm_process_flock_lock(struct ldlm_lock *req, __u64 *flags, int first_enq,
53680 int added = (mode == LCK_NL);
53681 int overlaps = 0;
53682 int splitted = 0;
53683- const struct ldlm_callback_suite null_cbs = { NULL };
53684+ const struct ldlm_callback_suite null_cbs = { };
53685
53686 CDEBUG(D_DLMTRACE,
53687 "flags %#llx owner %llu pid %u mode %u start %llu end %llu\n",
53688diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c
53689index c539e37..743b213 100644
53690--- a/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c
53691+++ b/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c
53692@@ -237,7 +237,7 @@ static int proc_console_max_delay_cs(struct ctl_table *table, int write,
53693 loff_t *ppos)
53694 {
53695 int rc, max_delay_cs;
53696- struct ctl_table dummy = *table;
53697+ ctl_table_no_const dummy = *table;
53698 long d;
53699
53700 dummy.data = &max_delay_cs;
53701@@ -270,7 +270,7 @@ static int proc_console_min_delay_cs(struct ctl_table *table, int write,
53702 loff_t *ppos)
53703 {
53704 int rc, min_delay_cs;
53705- struct ctl_table dummy = *table;
53706+ ctl_table_no_const dummy = *table;
53707 long d;
53708
53709 dummy.data = &min_delay_cs;
53710@@ -302,7 +302,7 @@ static int proc_console_backoff(struct ctl_table *table, int write,
53711 void __user *buffer, size_t *lenp, loff_t *ppos)
53712 {
53713 int rc, backoff;
53714- struct ctl_table dummy = *table;
53715+ ctl_table_no_const dummy = *table;
53716
53717 dummy.data = &backoff;
53718 dummy.proc_handler = &proc_dointvec;
53719diff --git a/drivers/staging/lustre/lustre/libcfs/module.c b/drivers/staging/lustre/lustre/libcfs/module.c
53720index 7dc77dd..289d03e 100644
53721--- a/drivers/staging/lustre/lustre/libcfs/module.c
53722+++ b/drivers/staging/lustre/lustre/libcfs/module.c
53723@@ -313,11 +313,11 @@ out:
53724
53725
53726 struct cfs_psdev_ops libcfs_psdev_ops = {
53727- libcfs_psdev_open,
53728- libcfs_psdev_release,
53729- NULL,
53730- NULL,
53731- libcfs_ioctl
53732+ .p_open = libcfs_psdev_open,
53733+ .p_close = libcfs_psdev_release,
53734+ .p_read = NULL,
53735+ .p_write = NULL,
53736+ .p_ioctl = libcfs_ioctl
53737 };
53738
53739 extern int insert_proc(void);
53740diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
53741index 22667db..8b703b6 100644
53742--- a/drivers/staging/octeon/ethernet-rx.c
53743+++ b/drivers/staging/octeon/ethernet-rx.c
53744@@ -354,14 +354,14 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
53745 /* Increment RX stats for virtual ports */
53746 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
53747 #ifdef CONFIG_64BIT
53748- atomic64_add(1,
53749+ atomic64_add_unchecked(1,
53750 (atomic64_t *)&priv->stats.rx_packets);
53751- atomic64_add(skb->len,
53752+ atomic64_add_unchecked(skb->len,
53753 (atomic64_t *)&priv->stats.rx_bytes);
53754 #else
53755- atomic_add(1,
53756+ atomic_add_unchecked(1,
53757 (atomic_t *)&priv->stats.rx_packets);
53758- atomic_add(skb->len,
53759+ atomic_add_unchecked(skb->len,
53760 (atomic_t *)&priv->stats.rx_bytes);
53761 #endif
53762 }
53763@@ -373,10 +373,10 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
53764 dev->name);
53765 */
53766 #ifdef CONFIG_64BIT
53767- atomic64_add(1,
53768+ atomic64_add_unchecked(1,
53769 (atomic64_t *)&priv->stats.rx_dropped);
53770 #else
53771- atomic_add(1,
53772+ atomic_add_unchecked(1,
53773 (atomic_t *)&priv->stats.rx_dropped);
53774 #endif
53775 dev_kfree_skb_irq(skb);
53776diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
53777index 460e854..f926452 100644
53778--- a/drivers/staging/octeon/ethernet.c
53779+++ b/drivers/staging/octeon/ethernet.c
53780@@ -241,11 +241,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
53781 * since the RX tasklet also increments it.
53782 */
53783 #ifdef CONFIG_64BIT
53784- atomic64_add(rx_status.dropped_packets,
53785- (atomic64_t *)&priv->stats.rx_dropped);
53786+ atomic64_add_unchecked(rx_status.dropped_packets,
53787+ (atomic64_unchecked_t *)&priv->stats.rx_dropped);
53788 #else
53789- atomic_add(rx_status.dropped_packets,
53790- (atomic_t *)&priv->stats.rx_dropped);
53791+ atomic_add_unchecked(rx_status.dropped_packets,
53792+ (atomic_unchecked_t *)&priv->stats.rx_dropped);
53793 #endif
53794 }
53795
53796diff --git a/drivers/staging/rtl8188eu/include/hal_intf.h b/drivers/staging/rtl8188eu/include/hal_intf.h
53797index 3b476d8..f522d68 100644
53798--- a/drivers/staging/rtl8188eu/include/hal_intf.h
53799+++ b/drivers/staging/rtl8188eu/include/hal_intf.h
53800@@ -225,7 +225,7 @@ struct hal_ops {
53801
53802 void (*hal_notch_filter)(struct adapter *adapter, bool enable);
53803 void (*hal_reset_security_engine)(struct adapter *adapter);
53804-};
53805+} __no_const;
53806
53807 enum rt_eeprom_type {
53808 EEPROM_93C46,
53809diff --git a/drivers/staging/rtl8712/rtl871x_io.h b/drivers/staging/rtl8712/rtl871x_io.h
53810index 070cc03..6806e37 100644
53811--- a/drivers/staging/rtl8712/rtl871x_io.h
53812+++ b/drivers/staging/rtl8712/rtl871x_io.h
53813@@ -108,7 +108,7 @@ struct _io_ops {
53814 u8 *pmem);
53815 u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
53816 u8 *pmem);
53817-};
53818+} __no_const;
53819
53820 struct io_req {
53821 struct list_head list;
53822diff --git a/drivers/staging/unisys/visorchipset/visorchipset.h b/drivers/staging/unisys/visorchipset/visorchipset.h
53823index 98f3ba4..c6a7fce 100644
53824--- a/drivers/staging/unisys/visorchipset/visorchipset.h
53825+++ b/drivers/staging/unisys/visorchipset/visorchipset.h
53826@@ -171,7 +171,7 @@ struct visorchipset_busdev_notifiers {
53827 void (*device_resume)(ulong bus_no, ulong dev_no);
53828 int (*get_channel_info)(uuid_le type_uuid, ulong *min_size,
53829 ulong *max_size);
53830-};
53831+} __no_const;
53832
53833 /* These functions live inside visorchipset, and will be called to indicate
53834 * responses to specific events (by code outside of visorchipset).
53835@@ -186,7 +186,7 @@ struct visorchipset_busdev_responders {
53836 void (*device_destroy)(ulong bus_no, ulong dev_no, int response);
53837 void (*device_pause)(ulong bus_no, ulong dev_no, int response);
53838 void (*device_resume)(ulong bus_no, ulong dev_no, int response);
53839-};
53840+} __no_const;
53841
53842 /** Register functions (in the bus driver) to get called by visorchipset
53843 * whenever a bus or device appears for which this service partition is
53844diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c
53845index 9512af6..045bf5a 100644
53846--- a/drivers/target/sbp/sbp_target.c
53847+++ b/drivers/target/sbp/sbp_target.c
53848@@ -62,7 +62,7 @@ static const u32 sbp_unit_directory_template[] = {
53849
53850 #define SESSION_MAINTENANCE_INTERVAL HZ
53851
53852-static atomic_t login_id = ATOMIC_INIT(0);
53853+static atomic_unchecked_t login_id = ATOMIC_INIT(0);
53854
53855 static void session_maintenance_work(struct work_struct *);
53856 static int sbp_run_transaction(struct fw_card *, int, int, int, int,
53857@@ -444,7 +444,7 @@ static void sbp_management_request_login(
53858 login->lun = se_lun;
53859 login->status_fifo_addr = sbp2_pointer_to_addr(&req->orb.status_fifo);
53860 login->exclusive = LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req->orb.misc));
53861- login->login_id = atomic_inc_return(&login_id);
53862+ login->login_id = atomic_inc_return_unchecked(&login_id);
53863
53864 login->tgt_agt = sbp_target_agent_register(login);
53865 if (IS_ERR(login->tgt_agt)) {
53866diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
53867index 7faa6ae..ae6c410 100644
53868--- a/drivers/target/target_core_device.c
53869+++ b/drivers/target/target_core_device.c
53870@@ -1495,7 +1495,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
53871 spin_lock_init(&dev->se_tmr_lock);
53872 spin_lock_init(&dev->qf_cmd_lock);
53873 sema_init(&dev->caw_sem, 1);
53874- atomic_set(&dev->dev_ordered_id, 0);
53875+ atomic_set_unchecked(&dev->dev_ordered_id, 0);
53876 INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list);
53877 spin_lock_init(&dev->t10_wwn.t10_vpd_lock);
53878 INIT_LIST_HEAD(&dev->t10_pr.registration_list);
53879diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
53880index f786de0..04b643e 100644
53881--- a/drivers/target/target_core_transport.c
53882+++ b/drivers/target/target_core_transport.c
53883@@ -1168,7 +1168,7 @@ transport_check_alloc_task_attr(struct se_cmd *cmd)
53884 * Used to determine when ORDERED commands should go from
53885 * Dormant to Active status.
53886 */
53887- cmd->se_ordered_id = atomic_inc_return(&dev->dev_ordered_id);
53888+ cmd->se_ordered_id = atomic_inc_return_unchecked(&dev->dev_ordered_id);
53889 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
53890 cmd->se_ordered_id, cmd->sam_task_attr,
53891 dev->transport->name);
53892diff --git a/drivers/thermal/int340x_thermal/int3400_thermal.c b/drivers/thermal/int340x_thermal/int3400_thermal.c
53893index 031018e..90981a1 100644
53894--- a/drivers/thermal/int340x_thermal/int3400_thermal.c
53895+++ b/drivers/thermal/int340x_thermal/int3400_thermal.c
53896@@ -272,8 +272,10 @@ static int int3400_thermal_probe(struct platform_device *pdev)
53897 platform_set_drvdata(pdev, priv);
53898
53899 if (priv->uuid_bitmap & 1 << INT3400_THERMAL_PASSIVE_1) {
53900- int3400_thermal_ops.get_mode = int3400_thermal_get_mode;
53901- int3400_thermal_ops.set_mode = int3400_thermal_set_mode;
53902+ pax_open_kernel();
53903+ *(void **)&int3400_thermal_ops.get_mode = int3400_thermal_get_mode;
53904+ *(void **)&int3400_thermal_ops.set_mode = int3400_thermal_set_mode;
53905+ pax_close_kernel();
53906 }
53907 priv->thermal = thermal_zone_device_register("INT3400 Thermal", 0, 0,
53908 priv, &int3400_thermal_ops,
53909diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c
53910index 668fb1b..2737bbe 100644
53911--- a/drivers/thermal/of-thermal.c
53912+++ b/drivers/thermal/of-thermal.c
53913@@ -31,6 +31,7 @@
53914 #include <linux/export.h>
53915 #include <linux/string.h>
53916 #include <linux/thermal.h>
53917+#include <linux/mm.h>
53918
53919 #include "thermal_core.h"
53920
53921@@ -412,9 +413,11 @@ thermal_zone_of_add_sensor(struct device_node *zone,
53922 tz->ops = ops;
53923 tz->sensor_data = data;
53924
53925- tzd->ops->get_temp = of_thermal_get_temp;
53926- tzd->ops->get_trend = of_thermal_get_trend;
53927- tzd->ops->set_emul_temp = of_thermal_set_emul_temp;
53928+ pax_open_kernel();
53929+ *(void **)&tzd->ops->get_temp = of_thermal_get_temp;
53930+ *(void **)&tzd->ops->get_trend = of_thermal_get_trend;
53931+ *(void **)&tzd->ops->set_emul_temp = of_thermal_set_emul_temp;
53932+ pax_close_kernel();
53933 mutex_unlock(&tzd->lock);
53934
53935 return tzd;
53936@@ -544,9 +547,11 @@ void thermal_zone_of_sensor_unregister(struct device *dev,
53937 return;
53938
53939 mutex_lock(&tzd->lock);
53940- tzd->ops->get_temp = NULL;
53941- tzd->ops->get_trend = NULL;
53942- tzd->ops->set_emul_temp = NULL;
53943+ pax_open_kernel();
53944+ *(void **)&tzd->ops->get_temp = NULL;
53945+ *(void **)&tzd->ops->get_trend = NULL;
53946+ *(void **)&tzd->ops->set_emul_temp = NULL;
53947+ pax_close_kernel();
53948
53949 tz->ops = NULL;
53950 tz->sensor_data = NULL;
53951diff --git a/drivers/thermal/x86_pkg_temp_thermal.c b/drivers/thermal/x86_pkg_temp_thermal.c
53952index 9ea3d9d..53e8792 100644
53953--- a/drivers/thermal/x86_pkg_temp_thermal.c
53954+++ b/drivers/thermal/x86_pkg_temp_thermal.c
53955@@ -567,7 +567,7 @@ static int pkg_temp_thermal_cpu_callback(struct notifier_block *nfb,
53956 return NOTIFY_OK;
53957 }
53958
53959-static struct notifier_block pkg_temp_thermal_notifier __refdata = {
53960+static struct notifier_block pkg_temp_thermal_notifier __refconst = {
53961 .notifier_call = pkg_temp_thermal_cpu_callback,
53962 };
53963
53964diff --git a/drivers/tty/cyclades.c b/drivers/tty/cyclades.c
53965index fd66f57..48e6376 100644
53966--- a/drivers/tty/cyclades.c
53967+++ b/drivers/tty/cyclades.c
53968@@ -1570,10 +1570,10 @@ static int cy_open(struct tty_struct *tty, struct file *filp)
53969 printk(KERN_DEBUG "cyc:cy_open ttyC%d, count = %d\n", info->line,
53970 info->port.count);
53971 #endif
53972- info->port.count++;
53973+ atomic_inc(&info->port.count);
53974 #ifdef CY_DEBUG_COUNT
53975 printk(KERN_DEBUG "cyc:cy_open (%d): incrementing count to %d\n",
53976- current->pid, info->port.count);
53977+ current->pid, atomic_read(&info->port.count));
53978 #endif
53979
53980 /*
53981@@ -3974,7 +3974,7 @@ static int cyclades_proc_show(struct seq_file *m, void *v)
53982 for (j = 0; j < cy_card[i].nports; j++) {
53983 info = &cy_card[i].ports[j];
53984
53985- if (info->port.count) {
53986+ if (atomic_read(&info->port.count)) {
53987 /* XXX is the ldisc num worth this? */
53988 struct tty_struct *tty;
53989 struct tty_ldisc *ld;
53990diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c
53991index 4fcec1d..5a036f7 100644
53992--- a/drivers/tty/hvc/hvc_console.c
53993+++ b/drivers/tty/hvc/hvc_console.c
53994@@ -342,7 +342,7 @@ static int hvc_open(struct tty_struct *tty, struct file * filp)
53995
53996 spin_lock_irqsave(&hp->port.lock, flags);
53997 /* Check and then increment for fast path open. */
53998- if (hp->port.count++ > 0) {
53999+ if (atomic_inc_return(&hp->port.count) > 1) {
54000 spin_unlock_irqrestore(&hp->port.lock, flags);
54001 hvc_kick();
54002 return 0;
54003@@ -397,7 +397,7 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
54004
54005 spin_lock_irqsave(&hp->port.lock, flags);
54006
54007- if (--hp->port.count == 0) {
54008+ if (atomic_dec_return(&hp->port.count) == 0) {
54009 spin_unlock_irqrestore(&hp->port.lock, flags);
54010 /* We are done with the tty pointer now. */
54011 tty_port_tty_set(&hp->port, NULL);
54012@@ -419,9 +419,9 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
54013 */
54014 tty_wait_until_sent_from_close(tty, HVC_CLOSE_WAIT);
54015 } else {
54016- if (hp->port.count < 0)
54017+ if (atomic_read(&hp->port.count) < 0)
54018 printk(KERN_ERR "hvc_close %X: oops, count is %d\n",
54019- hp->vtermno, hp->port.count);
54020+ hp->vtermno, atomic_read(&hp->port.count));
54021 spin_unlock_irqrestore(&hp->port.lock, flags);
54022 }
54023 }
54024@@ -451,12 +451,12 @@ static void hvc_hangup(struct tty_struct *tty)
54025 * open->hangup case this can be called after the final close so prevent
54026 * that from happening for now.
54027 */
54028- if (hp->port.count <= 0) {
54029+ if (atomic_read(&hp->port.count) <= 0) {
54030 spin_unlock_irqrestore(&hp->port.lock, flags);
54031 return;
54032 }
54033
54034- hp->port.count = 0;
54035+ atomic_set(&hp->port.count, 0);
54036 spin_unlock_irqrestore(&hp->port.lock, flags);
54037 tty_port_tty_set(&hp->port, NULL);
54038
54039@@ -504,7 +504,7 @@ static int hvc_write(struct tty_struct *tty, const unsigned char *buf, int count
54040 return -EPIPE;
54041
54042 /* FIXME what's this (unprotected) check for? */
54043- if (hp->port.count <= 0)
54044+ if (atomic_read(&hp->port.count) <= 0)
54045 return -EIO;
54046
54047 spin_lock_irqsave(&hp->lock, flags);
54048diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
54049index 81ff7e1..dfb7b71 100644
54050--- a/drivers/tty/hvc/hvcs.c
54051+++ b/drivers/tty/hvc/hvcs.c
54052@@ -83,6 +83,7 @@
54053 #include <asm/hvcserver.h>
54054 #include <asm/uaccess.h>
54055 #include <asm/vio.h>
54056+#include <asm/local.h>
54057
54058 /*
54059 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
54060@@ -416,7 +417,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut
54061
54062 spin_lock_irqsave(&hvcsd->lock, flags);
54063
54064- if (hvcsd->port.count > 0) {
54065+ if (atomic_read(&hvcsd->port.count) > 0) {
54066 spin_unlock_irqrestore(&hvcsd->lock, flags);
54067 printk(KERN_INFO "HVCS: vterm state unchanged. "
54068 "The hvcs device node is still in use.\n");
54069@@ -1127,7 +1128,7 @@ static int hvcs_install(struct tty_driver *driver, struct tty_struct *tty)
54070 }
54071 }
54072
54073- hvcsd->port.count = 0;
54074+ atomic_set(&hvcsd->port.count, 0);
54075 hvcsd->port.tty = tty;
54076 tty->driver_data = hvcsd;
54077
54078@@ -1180,7 +1181,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
54079 unsigned long flags;
54080
54081 spin_lock_irqsave(&hvcsd->lock, flags);
54082- hvcsd->port.count++;
54083+ atomic_inc(&hvcsd->port.count);
54084 hvcsd->todo_mask |= HVCS_SCHED_READ;
54085 spin_unlock_irqrestore(&hvcsd->lock, flags);
54086
54087@@ -1216,7 +1217,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
54088 hvcsd = tty->driver_data;
54089
54090 spin_lock_irqsave(&hvcsd->lock, flags);
54091- if (--hvcsd->port.count == 0) {
54092+ if (atomic_dec_and_test(&hvcsd->port.count)) {
54093
54094 vio_disable_interrupts(hvcsd->vdev);
54095
54096@@ -1241,10 +1242,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
54097
54098 free_irq(irq, hvcsd);
54099 return;
54100- } else if (hvcsd->port.count < 0) {
54101+ } else if (atomic_read(&hvcsd->port.count) < 0) {
54102 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
54103 " is missmanaged.\n",
54104- hvcsd->vdev->unit_address, hvcsd->port.count);
54105+ hvcsd->vdev->unit_address, atomic_read(&hvcsd->port.count));
54106 }
54107
54108 spin_unlock_irqrestore(&hvcsd->lock, flags);
54109@@ -1266,7 +1267,7 @@ static void hvcs_hangup(struct tty_struct * tty)
54110
54111 spin_lock_irqsave(&hvcsd->lock, flags);
54112 /* Preserve this so that we know how many kref refs to put */
54113- temp_open_count = hvcsd->port.count;
54114+ temp_open_count = atomic_read(&hvcsd->port.count);
54115
54116 /*
54117 * Don't kref put inside the spinlock because the destruction
54118@@ -1281,7 +1282,7 @@ static void hvcs_hangup(struct tty_struct * tty)
54119 tty->driver_data = NULL;
54120 hvcsd->port.tty = NULL;
54121
54122- hvcsd->port.count = 0;
54123+ atomic_set(&hvcsd->port.count, 0);
54124
54125 /* This will drop any buffered data on the floor which is OK in a hangup
54126 * scenario. */
54127@@ -1352,7 +1353,7 @@ static int hvcs_write(struct tty_struct *tty,
54128 * the middle of a write operation? This is a crummy place to do this
54129 * but we want to keep it all in the spinlock.
54130 */
54131- if (hvcsd->port.count <= 0) {
54132+ if (atomic_read(&hvcsd->port.count) <= 0) {
54133 spin_unlock_irqrestore(&hvcsd->lock, flags);
54134 return -ENODEV;
54135 }
54136@@ -1426,7 +1427,7 @@ static int hvcs_write_room(struct tty_struct *tty)
54137 {
54138 struct hvcs_struct *hvcsd = tty->driver_data;
54139
54140- if (!hvcsd || hvcsd->port.count <= 0)
54141+ if (!hvcsd || atomic_read(&hvcsd->port.count) <= 0)
54142 return 0;
54143
54144 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
54145diff --git a/drivers/tty/hvc/hvsi.c b/drivers/tty/hvc/hvsi.c
54146index 4190199..06d5bfa 100644
54147--- a/drivers/tty/hvc/hvsi.c
54148+++ b/drivers/tty/hvc/hvsi.c
54149@@ -85,7 +85,7 @@ struct hvsi_struct {
54150 int n_outbuf;
54151 uint32_t vtermno;
54152 uint32_t virq;
54153- atomic_t seqno; /* HVSI packet sequence number */
54154+ atomic_unchecked_t seqno; /* HVSI packet sequence number */
54155 uint16_t mctrl;
54156 uint8_t state; /* HVSI protocol state */
54157 uint8_t flags;
54158@@ -295,7 +295,7 @@ static int hvsi_version_respond(struct hvsi_struct *hp, uint16_t query_seqno)
54159
54160 packet.hdr.type = VS_QUERY_RESPONSE_PACKET_HEADER;
54161 packet.hdr.len = sizeof(struct hvsi_query_response);
54162- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
54163+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
54164 packet.verb = VSV_SEND_VERSION_NUMBER;
54165 packet.u.version = HVSI_VERSION;
54166 packet.query_seqno = query_seqno+1;
54167@@ -555,7 +555,7 @@ static int hvsi_query(struct hvsi_struct *hp, uint16_t verb)
54168
54169 packet.hdr.type = VS_QUERY_PACKET_HEADER;
54170 packet.hdr.len = sizeof(struct hvsi_query);
54171- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
54172+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
54173 packet.verb = verb;
54174
54175 pr_debug("%s: sending %i bytes\n", __func__, packet.hdr.len);
54176@@ -597,7 +597,7 @@ static int hvsi_set_mctrl(struct hvsi_struct *hp, uint16_t mctrl)
54177 int wrote;
54178
54179 packet.hdr.type = VS_CONTROL_PACKET_HEADER,
54180- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
54181+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
54182 packet.hdr.len = sizeof(struct hvsi_control);
54183 packet.verb = VSV_SET_MODEM_CTL;
54184 packet.mask = HVSI_TSDTR;
54185@@ -680,7 +680,7 @@ static int hvsi_put_chars(struct hvsi_struct *hp, const char *buf, int count)
54186 BUG_ON(count > HVSI_MAX_OUTGOING_DATA);
54187
54188 packet.hdr.type = VS_DATA_PACKET_HEADER;
54189- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
54190+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
54191 packet.hdr.len = count + sizeof(struct hvsi_header);
54192 memcpy(&packet.data, buf, count);
54193
54194@@ -697,7 +697,7 @@ static void hvsi_close_protocol(struct hvsi_struct *hp)
54195 struct hvsi_control packet __ALIGNED__;
54196
54197 packet.hdr.type = VS_CONTROL_PACKET_HEADER;
54198- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
54199+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
54200 packet.hdr.len = 6;
54201 packet.verb = VSV_CLOSE_PROTOCOL;
54202
54203@@ -725,7 +725,7 @@ static int hvsi_open(struct tty_struct *tty, struct file *filp)
54204
54205 tty_port_tty_set(&hp->port, tty);
54206 spin_lock_irqsave(&hp->lock, flags);
54207- hp->port.count++;
54208+ atomic_inc(&hp->port.count);
54209 atomic_set(&hp->seqno, 0);
54210 h_vio_signal(hp->vtermno, VIO_IRQ_ENABLE);
54211 spin_unlock_irqrestore(&hp->lock, flags);
54212@@ -782,7 +782,7 @@ static void hvsi_close(struct tty_struct *tty, struct file *filp)
54213
54214 spin_lock_irqsave(&hp->lock, flags);
54215
54216- if (--hp->port.count == 0) {
54217+ if (atomic_dec_return(&hp->port.count) == 0) {
54218 tty_port_tty_set(&hp->port, NULL);
54219 hp->inbuf_end = hp->inbuf; /* discard remaining partial packets */
54220
54221@@ -815,9 +815,9 @@ static void hvsi_close(struct tty_struct *tty, struct file *filp)
54222
54223 spin_lock_irqsave(&hp->lock, flags);
54224 }
54225- } else if (hp->port.count < 0)
54226+ } else if (atomic_read(&hp->port.count) < 0)
54227 printk(KERN_ERR "hvsi_close %lu: oops, count is %d\n",
54228- hp - hvsi_ports, hp->port.count);
54229+ hp - hvsi_ports, atomic_read(&hp->port.count));
54230
54231 spin_unlock_irqrestore(&hp->lock, flags);
54232 }
54233@@ -832,7 +832,7 @@ static void hvsi_hangup(struct tty_struct *tty)
54234 tty_port_tty_set(&hp->port, NULL);
54235
54236 spin_lock_irqsave(&hp->lock, flags);
54237- hp->port.count = 0;
54238+ atomic_set(&hp->port.count, 0);
54239 hp->n_outbuf = 0;
54240 spin_unlock_irqrestore(&hp->lock, flags);
54241 }
54242diff --git a/drivers/tty/hvc/hvsi_lib.c b/drivers/tty/hvc/hvsi_lib.c
54243index a270f04..7c77b5d 100644
54244--- a/drivers/tty/hvc/hvsi_lib.c
54245+++ b/drivers/tty/hvc/hvsi_lib.c
54246@@ -8,7 +8,7 @@
54247
54248 static int hvsi_send_packet(struct hvsi_priv *pv, struct hvsi_header *packet)
54249 {
54250- packet->seqno = cpu_to_be16(atomic_inc_return(&pv->seqno));
54251+ packet->seqno = cpu_to_be16(atomic_inc_return_unchecked(&pv->seqno));
54252
54253 /* Assumes that always succeeds, works in practice */
54254 return pv->put_chars(pv->termno, (char *)packet, packet->len);
54255@@ -20,7 +20,7 @@ static void hvsi_start_handshake(struct hvsi_priv *pv)
54256
54257 /* Reset state */
54258 pv->established = 0;
54259- atomic_set(&pv->seqno, 0);
54260+ atomic_set_unchecked(&pv->seqno, 0);
54261
54262 pr_devel("HVSI@%x: Handshaking started\n", pv->termno);
54263
54264diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
54265index 345cebb..d5a1e9e 100644
54266--- a/drivers/tty/ipwireless/tty.c
54267+++ b/drivers/tty/ipwireless/tty.c
54268@@ -28,6 +28,7 @@
54269 #include <linux/tty_driver.h>
54270 #include <linux/tty_flip.h>
54271 #include <linux/uaccess.h>
54272+#include <asm/local.h>
54273
54274 #include "tty.h"
54275 #include "network.h"
54276@@ -93,10 +94,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
54277 return -ENODEV;
54278
54279 mutex_lock(&tty->ipw_tty_mutex);
54280- if (tty->port.count == 0)
54281+ if (atomic_read(&tty->port.count) == 0)
54282 tty->tx_bytes_queued = 0;
54283
54284- tty->port.count++;
54285+ atomic_inc(&tty->port.count);
54286
54287 tty->port.tty = linux_tty;
54288 linux_tty->driver_data = tty;
54289@@ -112,9 +113,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
54290
54291 static void do_ipw_close(struct ipw_tty *tty)
54292 {
54293- tty->port.count--;
54294-
54295- if (tty->port.count == 0) {
54296+ if (atomic_dec_return(&tty->port.count) == 0) {
54297 struct tty_struct *linux_tty = tty->port.tty;
54298
54299 if (linux_tty != NULL) {
54300@@ -135,7 +134,7 @@ static void ipw_hangup(struct tty_struct *linux_tty)
54301 return;
54302
54303 mutex_lock(&tty->ipw_tty_mutex);
54304- if (tty->port.count == 0) {
54305+ if (atomic_read(&tty->port.count) == 0) {
54306 mutex_unlock(&tty->ipw_tty_mutex);
54307 return;
54308 }
54309@@ -158,7 +157,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
54310
54311 mutex_lock(&tty->ipw_tty_mutex);
54312
54313- if (!tty->port.count) {
54314+ if (!atomic_read(&tty->port.count)) {
54315 mutex_unlock(&tty->ipw_tty_mutex);
54316 return;
54317 }
54318@@ -197,7 +196,7 @@ static int ipw_write(struct tty_struct *linux_tty,
54319 return -ENODEV;
54320
54321 mutex_lock(&tty->ipw_tty_mutex);
54322- if (!tty->port.count) {
54323+ if (!atomic_read(&tty->port.count)) {
54324 mutex_unlock(&tty->ipw_tty_mutex);
54325 return -EINVAL;
54326 }
54327@@ -237,7 +236,7 @@ static int ipw_write_room(struct tty_struct *linux_tty)
54328 if (!tty)
54329 return -ENODEV;
54330
54331- if (!tty->port.count)
54332+ if (!atomic_read(&tty->port.count))
54333 return -EINVAL;
54334
54335 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
54336@@ -279,7 +278,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
54337 if (!tty)
54338 return 0;
54339
54340- if (!tty->port.count)
54341+ if (!atomic_read(&tty->port.count))
54342 return 0;
54343
54344 return tty->tx_bytes_queued;
54345@@ -360,7 +359,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty)
54346 if (!tty)
54347 return -ENODEV;
54348
54349- if (!tty->port.count)
54350+ if (!atomic_read(&tty->port.count))
54351 return -EINVAL;
54352
54353 return get_control_lines(tty);
54354@@ -376,7 +375,7 @@ ipw_tiocmset(struct tty_struct *linux_tty,
54355 if (!tty)
54356 return -ENODEV;
54357
54358- if (!tty->port.count)
54359+ if (!atomic_read(&tty->port.count))
54360 return -EINVAL;
54361
54362 return set_control_lines(tty, set, clear);
54363@@ -390,7 +389,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty,
54364 if (!tty)
54365 return -ENODEV;
54366
54367- if (!tty->port.count)
54368+ if (!atomic_read(&tty->port.count))
54369 return -EINVAL;
54370
54371 /* FIXME: Exactly how is the tty object locked here .. */
54372@@ -546,7 +545,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
54373 * are gone */
54374 mutex_lock(&ttyj->ipw_tty_mutex);
54375 }
54376- while (ttyj->port.count)
54377+ while (atomic_read(&ttyj->port.count))
54378 do_ipw_close(ttyj);
54379 ipwireless_disassociate_network_ttys(network,
54380 ttyj->channel_idx);
54381diff --git a/drivers/tty/moxa.c b/drivers/tty/moxa.c
54382index 14c54e0..1efd4f2 100644
54383--- a/drivers/tty/moxa.c
54384+++ b/drivers/tty/moxa.c
54385@@ -1189,7 +1189,7 @@ static int moxa_open(struct tty_struct *tty, struct file *filp)
54386 }
54387
54388 ch = &brd->ports[port % MAX_PORTS_PER_BOARD];
54389- ch->port.count++;
54390+ atomic_inc(&ch->port.count);
54391 tty->driver_data = ch;
54392 tty_port_tty_set(&ch->port, tty);
54393 mutex_lock(&ch->port.mutex);
54394diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
54395index bce16e4..1120a85 100644
54396--- a/drivers/tty/n_gsm.c
54397+++ b/drivers/tty/n_gsm.c
54398@@ -1644,7 +1644,7 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
54399 spin_lock_init(&dlci->lock);
54400 mutex_init(&dlci->mutex);
54401 dlci->fifo = &dlci->_fifo;
54402- if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
54403+ if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
54404 kfree(dlci);
54405 return NULL;
54406 }
54407@@ -2958,7 +2958,7 @@ static int gsmtty_open(struct tty_struct *tty, struct file *filp)
54408 struct gsm_dlci *dlci = tty->driver_data;
54409 struct tty_port *port = &dlci->port;
54410
54411- port->count++;
54412+ atomic_inc(&port->count);
54413 tty_port_tty_set(port, tty);
54414
54415 dlci->modem_rx = 0;
54416diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
54417index eee40b5..796fb03 100644
54418--- a/drivers/tty/n_tty.c
54419+++ b/drivers/tty/n_tty.c
54420@@ -116,7 +116,7 @@ struct n_tty_data {
54421 int minimum_to_wake;
54422
54423 /* consumer-published */
54424- size_t read_tail;
54425+ size_t read_tail __intentional_overflow(-1);
54426 size_t line_start;
54427
54428 /* protected by output lock */
54429@@ -2572,6 +2572,7 @@ void n_tty_inherit_ops(struct tty_ldisc_ops *ops)
54430 {
54431 *ops = tty_ldisc_N_TTY;
54432 ops->owner = NULL;
54433- ops->refcount = ops->flags = 0;
54434+ atomic_set(&ops->refcount, 0);
54435+ ops->flags = 0;
54436 }
54437 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
54438diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
54439index 4d5e840..a2340a6 100644
54440--- a/drivers/tty/pty.c
54441+++ b/drivers/tty/pty.c
54442@@ -849,8 +849,10 @@ static void __init unix98_pty_init(void)
54443 panic("Couldn't register Unix98 pts driver");
54444
54445 /* Now create the /dev/ptmx special device */
54446+ pax_open_kernel();
54447 tty_default_fops(&ptmx_fops);
54448- ptmx_fops.open = ptmx_open;
54449+ *(void **)&ptmx_fops.open = ptmx_open;
54450+ pax_close_kernel();
54451
54452 cdev_init(&ptmx_cdev, &ptmx_fops);
54453 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
54454diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c
54455index c8dd8dc..dca6cfd 100644
54456--- a/drivers/tty/rocket.c
54457+++ b/drivers/tty/rocket.c
54458@@ -914,7 +914,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
54459 tty->driver_data = info;
54460 tty_port_tty_set(port, tty);
54461
54462- if (port->count++ == 0) {
54463+ if (atomic_inc_return(&port->count) == 1) {
54464 atomic_inc(&rp_num_ports_open);
54465
54466 #ifdef ROCKET_DEBUG_OPEN
54467@@ -923,7 +923,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
54468 #endif
54469 }
54470 #ifdef ROCKET_DEBUG_OPEN
54471- printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, info->port.count);
54472+ printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, atomic-read(&info->port.count));
54473 #endif
54474
54475 /*
54476@@ -1515,7 +1515,7 @@ static void rp_hangup(struct tty_struct *tty)
54477 spin_unlock_irqrestore(&info->port.lock, flags);
54478 return;
54479 }
54480- if (info->port.count)
54481+ if (atomic_read(&info->port.count))
54482 atomic_dec(&rp_num_ports_open);
54483 clear_bit((info->aiop * 8) + info->chan, (void *) &xmit_flags[info->board]);
54484 spin_unlock_irqrestore(&info->port.lock, flags);
54485diff --git a/drivers/tty/serial/ioc4_serial.c b/drivers/tty/serial/ioc4_serial.c
54486index aa28209..e08fb85 100644
54487--- a/drivers/tty/serial/ioc4_serial.c
54488+++ b/drivers/tty/serial/ioc4_serial.c
54489@@ -437,7 +437,7 @@ struct ioc4_soft {
54490 } is_intr_info[MAX_IOC4_INTR_ENTS];
54491
54492 /* Number of entries active in the above array */
54493- atomic_t is_num_intrs;
54494+ atomic_unchecked_t is_num_intrs;
54495 } is_intr_type[IOC4_NUM_INTR_TYPES];
54496
54497 /* is_ir_lock must be held while
54498@@ -974,7 +974,7 @@ intr_connect(struct ioc4_soft *soft, int type,
54499 BUG_ON(!((type == IOC4_SIO_INTR_TYPE)
54500 || (type == IOC4_OTHER_INTR_TYPE)));
54501
54502- i = atomic_inc_return(&soft-> is_intr_type[type].is_num_intrs) - 1;
54503+ i = atomic_inc_return_unchecked(&soft-> is_intr_type[type].is_num_intrs) - 1;
54504 BUG_ON(!(i < MAX_IOC4_INTR_ENTS || (printk("i %d\n", i), 0)));
54505
54506 /* Save off the lower level interrupt handler */
54507@@ -1001,7 +1001,7 @@ static irqreturn_t ioc4_intr(int irq, void *arg)
54508
54509 soft = arg;
54510 for (intr_type = 0; intr_type < IOC4_NUM_INTR_TYPES; intr_type++) {
54511- num_intrs = (int)atomic_read(
54512+ num_intrs = (int)atomic_read_unchecked(
54513 &soft->is_intr_type[intr_type].is_num_intrs);
54514
54515 this_mir = this_ir = pending_intrs(soft, intr_type);
54516diff --git a/drivers/tty/serial/kgdb_nmi.c b/drivers/tty/serial/kgdb_nmi.c
54517index 129dc5b..1da5bb8 100644
54518--- a/drivers/tty/serial/kgdb_nmi.c
54519+++ b/drivers/tty/serial/kgdb_nmi.c
54520@@ -53,7 +53,9 @@ static int kgdb_nmi_console_setup(struct console *co, char *options)
54521 * I/O utilities that messages sent to the console will automatically
54522 * be displayed on the dbg_io.
54523 */
54524- dbg_io_ops->is_console = true;
54525+ pax_open_kernel();
54526+ *(int *)&dbg_io_ops->is_console = true;
54527+ pax_close_kernel();
54528
54529 return 0;
54530 }
54531diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
54532index a260cde..6b2b5ce 100644
54533--- a/drivers/tty/serial/kgdboc.c
54534+++ b/drivers/tty/serial/kgdboc.c
54535@@ -24,8 +24,9 @@
54536 #define MAX_CONFIG_LEN 40
54537
54538 static struct kgdb_io kgdboc_io_ops;
54539+static struct kgdb_io kgdboc_io_ops_console;
54540
54541-/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
54542+/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
54543 static int configured = -1;
54544
54545 static char config[MAX_CONFIG_LEN];
54546@@ -151,6 +152,8 @@ static void cleanup_kgdboc(void)
54547 kgdboc_unregister_kbd();
54548 if (configured == 1)
54549 kgdb_unregister_io_module(&kgdboc_io_ops);
54550+ else if (configured == 2)
54551+ kgdb_unregister_io_module(&kgdboc_io_ops_console);
54552 }
54553
54554 static int configure_kgdboc(void)
54555@@ -160,13 +163,13 @@ static int configure_kgdboc(void)
54556 int err;
54557 char *cptr = config;
54558 struct console *cons;
54559+ int is_console = 0;
54560
54561 err = kgdboc_option_setup(config);
54562 if (err || !strlen(config) || isspace(config[0]))
54563 goto noconfig;
54564
54565 err = -ENODEV;
54566- kgdboc_io_ops.is_console = 0;
54567 kgdb_tty_driver = NULL;
54568
54569 kgdboc_use_kms = 0;
54570@@ -187,7 +190,7 @@ static int configure_kgdboc(void)
54571 int idx;
54572 if (cons->device && cons->device(cons, &idx) == p &&
54573 idx == tty_line) {
54574- kgdboc_io_ops.is_console = 1;
54575+ is_console = 1;
54576 break;
54577 }
54578 cons = cons->next;
54579@@ -197,7 +200,13 @@ static int configure_kgdboc(void)
54580 kgdb_tty_line = tty_line;
54581
54582 do_register:
54583- err = kgdb_register_io_module(&kgdboc_io_ops);
54584+ if (is_console) {
54585+ err = kgdb_register_io_module(&kgdboc_io_ops_console);
54586+ configured = 2;
54587+ } else {
54588+ err = kgdb_register_io_module(&kgdboc_io_ops);
54589+ configured = 1;
54590+ }
54591 if (err)
54592 goto noconfig;
54593
54594@@ -205,8 +214,6 @@ do_register:
54595 if (err)
54596 goto nmi_con_failed;
54597
54598- configured = 1;
54599-
54600 return 0;
54601
54602 nmi_con_failed:
54603@@ -223,7 +230,7 @@ noconfig:
54604 static int __init init_kgdboc(void)
54605 {
54606 /* Already configured? */
54607- if (configured == 1)
54608+ if (configured >= 1)
54609 return 0;
54610
54611 return configure_kgdboc();
54612@@ -272,7 +279,7 @@ static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp)
54613 if (config[len - 1] == '\n')
54614 config[len - 1] = '\0';
54615
54616- if (configured == 1)
54617+ if (configured >= 1)
54618 cleanup_kgdboc();
54619
54620 /* Go and configure with the new params. */
54621@@ -312,6 +319,15 @@ static struct kgdb_io kgdboc_io_ops = {
54622 .post_exception = kgdboc_post_exp_handler,
54623 };
54624
54625+static struct kgdb_io kgdboc_io_ops_console = {
54626+ .name = "kgdboc",
54627+ .read_char = kgdboc_get_char,
54628+ .write_char = kgdboc_put_char,
54629+ .pre_exception = kgdboc_pre_exp_handler,
54630+ .post_exception = kgdboc_post_exp_handler,
54631+ .is_console = 1
54632+};
54633+
54634 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
54635 /* This is only available if kgdboc is a built in for early debugging */
54636 static int __init kgdboc_early_init(char *opt)
54637diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
54638index b73889c..9f74f0a 100644
54639--- a/drivers/tty/serial/msm_serial.c
54640+++ b/drivers/tty/serial/msm_serial.c
54641@@ -1012,7 +1012,7 @@ static struct uart_driver msm_uart_driver = {
54642 .cons = MSM_CONSOLE,
54643 };
54644
54645-static atomic_t msm_uart_next_id = ATOMIC_INIT(0);
54646+static atomic_unchecked_t msm_uart_next_id = ATOMIC_INIT(0);
54647
54648 static const struct of_device_id msm_uartdm_table[] = {
54649 { .compatible = "qcom,msm-uartdm-v1.1", .data = (void *)UARTDM_1P1 },
54650@@ -1036,7 +1036,7 @@ static int msm_serial_probe(struct platform_device *pdev)
54651 line = pdev->id;
54652
54653 if (line < 0)
54654- line = atomic_inc_return(&msm_uart_next_id) - 1;
54655+ line = atomic_inc_return_unchecked(&msm_uart_next_id) - 1;
54656
54657 if (unlikely(line < 0 || line >= UART_NR))
54658 return -ENXIO;
54659diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
54660index cf08876..711e0bf 100644
54661--- a/drivers/tty/serial/samsung.c
54662+++ b/drivers/tty/serial/samsung.c
54663@@ -987,11 +987,16 @@ static void s3c24xx_serial_shutdown(struct uart_port *port)
54664 ourport->tx_in_progress = 0;
54665 }
54666
54667+static int s3c64xx_serial_startup(struct uart_port *port);
54668 static int s3c24xx_serial_startup(struct uart_port *port)
54669 {
54670 struct s3c24xx_uart_port *ourport = to_ourport(port);
54671 int ret;
54672
54673+ /* Startup sequence is different for s3c64xx and higher SoC's */
54674+ if (s3c24xx_serial_has_interrupt_mask(port))
54675+ return s3c64xx_serial_startup(port);
54676+
54677 dbg("s3c24xx_serial_startup: port=%p (%08llx,%p)\n",
54678 port, (unsigned long long)port->mapbase, port->membase);
54679
54680@@ -1697,10 +1702,6 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
54681 /* setup info for port */
54682 port->dev = &platdev->dev;
54683
54684- /* Startup sequence is different for s3c64xx and higher SoC's */
54685- if (s3c24xx_serial_has_interrupt_mask(port))
54686- s3c24xx_serial_ops.startup = s3c64xx_serial_startup;
54687-
54688 port->uartclk = 1;
54689
54690 if (cfg->uart_flags & UPF_CONS_FLOW) {
54691diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
54692index 6a1055a..5ca9ad9 100644
54693--- a/drivers/tty/serial/serial_core.c
54694+++ b/drivers/tty/serial/serial_core.c
54695@@ -1377,7 +1377,7 @@ static void uart_close(struct tty_struct *tty, struct file *filp)
54696 state = drv->state + tty->index;
54697 port = &state->port;
54698 spin_lock_irq(&port->lock);
54699- --port->count;
54700+ atomic_dec(&port->count);
54701 spin_unlock_irq(&port->lock);
54702 return;
54703 }
54704@@ -1387,7 +1387,7 @@ static void uart_close(struct tty_struct *tty, struct file *filp)
54705
54706 pr_debug("uart_close(%d) called\n", uport ? uport->line : -1);
54707
54708- if (!port->count || tty_port_close_start(port, tty, filp) == 0)
54709+ if (!atomic_read(&port->count) || tty_port_close_start(port, tty, filp) == 0)
54710 return;
54711
54712 /*
54713@@ -1511,7 +1511,7 @@ static void uart_hangup(struct tty_struct *tty)
54714 uart_flush_buffer(tty);
54715 uart_shutdown(tty, state);
54716 spin_lock_irqsave(&port->lock, flags);
54717- port->count = 0;
54718+ atomic_set(&port->count, 0);
54719 clear_bit(ASYNCB_NORMAL_ACTIVE, &port->flags);
54720 spin_unlock_irqrestore(&port->lock, flags);
54721 tty_port_tty_set(port, NULL);
54722@@ -1598,7 +1598,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
54723 pr_debug("uart_open(%d) called\n", line);
54724
54725 spin_lock_irq(&port->lock);
54726- ++port->count;
54727+ atomic_inc(&port->count);
54728 spin_unlock_irq(&port->lock);
54729
54730 /*
54731diff --git a/drivers/tty/synclink.c b/drivers/tty/synclink.c
54732index b799170..87dafd5 100644
54733--- a/drivers/tty/synclink.c
54734+++ b/drivers/tty/synclink.c
54735@@ -3090,7 +3090,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
54736
54737 if (debug_level >= DEBUG_LEVEL_INFO)
54738 printk("%s(%d):mgsl_close(%s) entry, count=%d\n",
54739- __FILE__,__LINE__, info->device_name, info->port.count);
54740+ __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count));
54741
54742 if (tty_port_close_start(&info->port, tty, filp) == 0)
54743 goto cleanup;
54744@@ -3108,7 +3108,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
54745 cleanup:
54746 if (debug_level >= DEBUG_LEVEL_INFO)
54747 printk("%s(%d):mgsl_close(%s) exit, count=%d\n", __FILE__,__LINE__,
54748- tty->driver->name, info->port.count);
54749+ tty->driver->name, atomic_read(&info->port.count));
54750
54751 } /* end of mgsl_close() */
54752
54753@@ -3207,8 +3207,8 @@ static void mgsl_hangup(struct tty_struct *tty)
54754
54755 mgsl_flush_buffer(tty);
54756 shutdown(info);
54757-
54758- info->port.count = 0;
54759+
54760+ atomic_set(&info->port.count, 0);
54761 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
54762 info->port.tty = NULL;
54763
54764@@ -3296,10 +3296,10 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
54765
54766 if (debug_level >= DEBUG_LEVEL_INFO)
54767 printk("%s(%d):block_til_ready before block on %s count=%d\n",
54768- __FILE__,__LINE__, tty->driver->name, port->count );
54769+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
54770
54771 spin_lock_irqsave(&info->irq_spinlock, flags);
54772- port->count--;
54773+ atomic_dec(&port->count);
54774 spin_unlock_irqrestore(&info->irq_spinlock, flags);
54775 port->blocked_open++;
54776
54777@@ -3327,7 +3327,7 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
54778
54779 if (debug_level >= DEBUG_LEVEL_INFO)
54780 printk("%s(%d):block_til_ready blocking on %s count=%d\n",
54781- __FILE__,__LINE__, tty->driver->name, port->count );
54782+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
54783
54784 tty_unlock(tty);
54785 schedule();
54786@@ -3339,12 +3339,12 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
54787
54788 /* FIXME: Racy on hangup during close wait */
54789 if (!tty_hung_up_p(filp))
54790- port->count++;
54791+ atomic_inc(&port->count);
54792 port->blocked_open--;
54793
54794 if (debug_level >= DEBUG_LEVEL_INFO)
54795 printk("%s(%d):block_til_ready after blocking on %s count=%d\n",
54796- __FILE__,__LINE__, tty->driver->name, port->count );
54797+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
54798
54799 if (!retval)
54800 port->flags |= ASYNC_NORMAL_ACTIVE;
54801@@ -3396,7 +3396,7 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
54802
54803 if (debug_level >= DEBUG_LEVEL_INFO)
54804 printk("%s(%d):mgsl_open(%s), old ref count = %d\n",
54805- __FILE__,__LINE__,tty->driver->name, info->port.count);
54806+ __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count));
54807
54808 /* If port is closing, signal caller to try again */
54809 if (info->port.flags & ASYNC_CLOSING){
54810@@ -3415,10 +3415,10 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
54811 spin_unlock_irqrestore(&info->netlock, flags);
54812 goto cleanup;
54813 }
54814- info->port.count++;
54815+ atomic_inc(&info->port.count);
54816 spin_unlock_irqrestore(&info->netlock, flags);
54817
54818- if (info->port.count == 1) {
54819+ if (atomic_read(&info->port.count) == 1) {
54820 /* 1st open on this device, init hardware */
54821 retval = startup(info);
54822 if (retval < 0)
54823@@ -3442,8 +3442,8 @@ cleanup:
54824 if (retval) {
54825 if (tty->count == 1)
54826 info->port.tty = NULL; /* tty layer will release tty struct */
54827- if(info->port.count)
54828- info->port.count--;
54829+ if (atomic_read(&info->port.count))
54830+ atomic_dec(&info->port.count);
54831 }
54832
54833 return retval;
54834@@ -7661,7 +7661,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
54835 unsigned short new_crctype;
54836
54837 /* return error if TTY interface open */
54838- if (info->port.count)
54839+ if (atomic_read(&info->port.count))
54840 return -EBUSY;
54841
54842 switch (encoding)
54843@@ -7756,7 +7756,7 @@ static int hdlcdev_open(struct net_device *dev)
54844
54845 /* arbitrate between network and tty opens */
54846 spin_lock_irqsave(&info->netlock, flags);
54847- if (info->port.count != 0 || info->netcount != 0) {
54848+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
54849 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
54850 spin_unlock_irqrestore(&info->netlock, flags);
54851 return -EBUSY;
54852@@ -7842,7 +7842,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
54853 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
54854
54855 /* return error if TTY interface open */
54856- if (info->port.count)
54857+ if (atomic_read(&info->port.count))
54858 return -EBUSY;
54859
54860 if (cmd != SIOCWANDEV)
54861diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c
54862index 0e8c39b..e0cb171 100644
54863--- a/drivers/tty/synclink_gt.c
54864+++ b/drivers/tty/synclink_gt.c
54865@@ -670,7 +670,7 @@ static int open(struct tty_struct *tty, struct file *filp)
54866 tty->driver_data = info;
54867 info->port.tty = tty;
54868
54869- DBGINFO(("%s open, old ref count = %d\n", info->device_name, info->port.count));
54870+ DBGINFO(("%s open, old ref count = %d\n", info->device_name, atomic_read(&info->port.count)));
54871
54872 /* If port is closing, signal caller to try again */
54873 if (info->port.flags & ASYNC_CLOSING){
54874@@ -691,10 +691,10 @@ static int open(struct tty_struct *tty, struct file *filp)
54875 mutex_unlock(&info->port.mutex);
54876 goto cleanup;
54877 }
54878- info->port.count++;
54879+ atomic_inc(&info->port.count);
54880 spin_unlock_irqrestore(&info->netlock, flags);
54881
54882- if (info->port.count == 1) {
54883+ if (atomic_read(&info->port.count) == 1) {
54884 /* 1st open on this device, init hardware */
54885 retval = startup(info);
54886 if (retval < 0) {
54887@@ -715,8 +715,8 @@ cleanup:
54888 if (retval) {
54889 if (tty->count == 1)
54890 info->port.tty = NULL; /* tty layer will release tty struct */
54891- if(info->port.count)
54892- info->port.count--;
54893+ if(atomic_read(&info->port.count))
54894+ atomic_dec(&info->port.count);
54895 }
54896
54897 DBGINFO(("%s open rc=%d\n", info->device_name, retval));
54898@@ -729,7 +729,7 @@ static void close(struct tty_struct *tty, struct file *filp)
54899
54900 if (sanity_check(info, tty->name, "close"))
54901 return;
54902- DBGINFO(("%s close entry, count=%d\n", info->device_name, info->port.count));
54903+ DBGINFO(("%s close entry, count=%d\n", info->device_name, atomic_read(&info->port.count)));
54904
54905 if (tty_port_close_start(&info->port, tty, filp) == 0)
54906 goto cleanup;
54907@@ -746,7 +746,7 @@ static void close(struct tty_struct *tty, struct file *filp)
54908 tty_port_close_end(&info->port, tty);
54909 info->port.tty = NULL;
54910 cleanup:
54911- DBGINFO(("%s close exit, count=%d\n", tty->driver->name, info->port.count));
54912+ DBGINFO(("%s close exit, count=%d\n", tty->driver->name, atomic_read(&info->port.count)));
54913 }
54914
54915 static void hangup(struct tty_struct *tty)
54916@@ -764,7 +764,7 @@ static void hangup(struct tty_struct *tty)
54917 shutdown(info);
54918
54919 spin_lock_irqsave(&info->port.lock, flags);
54920- info->port.count = 0;
54921+ atomic_set(&info->port.count, 0);
54922 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
54923 info->port.tty = NULL;
54924 spin_unlock_irqrestore(&info->port.lock, flags);
54925@@ -1449,7 +1449,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
54926 unsigned short new_crctype;
54927
54928 /* return error if TTY interface open */
54929- if (info->port.count)
54930+ if (atomic_read(&info->port.count))
54931 return -EBUSY;
54932
54933 DBGINFO(("%s hdlcdev_attach\n", info->device_name));
54934@@ -1544,7 +1544,7 @@ static int hdlcdev_open(struct net_device *dev)
54935
54936 /* arbitrate between network and tty opens */
54937 spin_lock_irqsave(&info->netlock, flags);
54938- if (info->port.count != 0 || info->netcount != 0) {
54939+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
54940 DBGINFO(("%s hdlc_open busy\n", dev->name));
54941 spin_unlock_irqrestore(&info->netlock, flags);
54942 return -EBUSY;
54943@@ -1629,7 +1629,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
54944 DBGINFO(("%s hdlcdev_ioctl\n", dev->name));
54945
54946 /* return error if TTY interface open */
54947- if (info->port.count)
54948+ if (atomic_read(&info->port.count))
54949 return -EBUSY;
54950
54951 if (cmd != SIOCWANDEV)
54952@@ -2413,7 +2413,7 @@ static irqreturn_t slgt_interrupt(int dummy, void *dev_id)
54953 if (port == NULL)
54954 continue;
54955 spin_lock(&port->lock);
54956- if ((port->port.count || port->netcount) &&
54957+ if ((atomic_read(&port->port.count) || port->netcount) &&
54958 port->pending_bh && !port->bh_running &&
54959 !port->bh_requested) {
54960 DBGISR(("%s bh queued\n", port->device_name));
54961@@ -3299,7 +3299,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
54962 add_wait_queue(&port->open_wait, &wait);
54963
54964 spin_lock_irqsave(&info->lock, flags);
54965- port->count--;
54966+ atomic_dec(&port->count);
54967 spin_unlock_irqrestore(&info->lock, flags);
54968 port->blocked_open++;
54969
54970@@ -3335,7 +3335,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
54971 remove_wait_queue(&port->open_wait, &wait);
54972
54973 if (!tty_hung_up_p(filp))
54974- port->count++;
54975+ atomic_inc(&port->count);
54976 port->blocked_open--;
54977
54978 if (!retval)
54979diff --git a/drivers/tty/synclinkmp.c b/drivers/tty/synclinkmp.c
54980index c3f9091..abe4601 100644
54981--- a/drivers/tty/synclinkmp.c
54982+++ b/drivers/tty/synclinkmp.c
54983@@ -750,7 +750,7 @@ static int open(struct tty_struct *tty, struct file *filp)
54984
54985 if (debug_level >= DEBUG_LEVEL_INFO)
54986 printk("%s(%d):%s open(), old ref count = %d\n",
54987- __FILE__,__LINE__,tty->driver->name, info->port.count);
54988+ __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count));
54989
54990 /* If port is closing, signal caller to try again */
54991 if (info->port.flags & ASYNC_CLOSING){
54992@@ -769,10 +769,10 @@ static int open(struct tty_struct *tty, struct file *filp)
54993 spin_unlock_irqrestore(&info->netlock, flags);
54994 goto cleanup;
54995 }
54996- info->port.count++;
54997+ atomic_inc(&info->port.count);
54998 spin_unlock_irqrestore(&info->netlock, flags);
54999
55000- if (info->port.count == 1) {
55001+ if (atomic_read(&info->port.count) == 1) {
55002 /* 1st open on this device, init hardware */
55003 retval = startup(info);
55004 if (retval < 0)
55005@@ -796,8 +796,8 @@ cleanup:
55006 if (retval) {
55007 if (tty->count == 1)
55008 info->port.tty = NULL; /* tty layer will release tty struct */
55009- if(info->port.count)
55010- info->port.count--;
55011+ if(atomic_read(&info->port.count))
55012+ atomic_dec(&info->port.count);
55013 }
55014
55015 return retval;
55016@@ -815,7 +815,7 @@ static void close(struct tty_struct *tty, struct file *filp)
55017
55018 if (debug_level >= DEBUG_LEVEL_INFO)
55019 printk("%s(%d):%s close() entry, count=%d\n",
55020- __FILE__,__LINE__, info->device_name, info->port.count);
55021+ __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count));
55022
55023 if (tty_port_close_start(&info->port, tty, filp) == 0)
55024 goto cleanup;
55025@@ -834,7 +834,7 @@ static void close(struct tty_struct *tty, struct file *filp)
55026 cleanup:
55027 if (debug_level >= DEBUG_LEVEL_INFO)
55028 printk("%s(%d):%s close() exit, count=%d\n", __FILE__,__LINE__,
55029- tty->driver->name, info->port.count);
55030+ tty->driver->name, atomic_read(&info->port.count));
55031 }
55032
55033 /* Called by tty_hangup() when a hangup is signaled.
55034@@ -857,7 +857,7 @@ static void hangup(struct tty_struct *tty)
55035 shutdown(info);
55036
55037 spin_lock_irqsave(&info->port.lock, flags);
55038- info->port.count = 0;
55039+ atomic_set(&info->port.count, 0);
55040 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
55041 info->port.tty = NULL;
55042 spin_unlock_irqrestore(&info->port.lock, flags);
55043@@ -1565,7 +1565,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
55044 unsigned short new_crctype;
55045
55046 /* return error if TTY interface open */
55047- if (info->port.count)
55048+ if (atomic_read(&info->port.count))
55049 return -EBUSY;
55050
55051 switch (encoding)
55052@@ -1660,7 +1660,7 @@ static int hdlcdev_open(struct net_device *dev)
55053
55054 /* arbitrate between network and tty opens */
55055 spin_lock_irqsave(&info->netlock, flags);
55056- if (info->port.count != 0 || info->netcount != 0) {
55057+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
55058 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
55059 spin_unlock_irqrestore(&info->netlock, flags);
55060 return -EBUSY;
55061@@ -1746,7 +1746,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
55062 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
55063
55064 /* return error if TTY interface open */
55065- if (info->port.count)
55066+ if (atomic_read(&info->port.count))
55067 return -EBUSY;
55068
55069 if (cmd != SIOCWANDEV)
55070@@ -2621,7 +2621,7 @@ static irqreturn_t synclinkmp_interrupt(int dummy, void *dev_id)
55071 * do not request bottom half processing if the
55072 * device is not open in a normal mode.
55073 */
55074- if ( port && (port->port.count || port->netcount) &&
55075+ if ( port && (atomic_read(&port->port.count) || port->netcount) &&
55076 port->pending_bh && !port->bh_running &&
55077 !port->bh_requested ) {
55078 if ( debug_level >= DEBUG_LEVEL_ISR )
55079@@ -3318,10 +3318,10 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
55080
55081 if (debug_level >= DEBUG_LEVEL_INFO)
55082 printk("%s(%d):%s block_til_ready() before block, count=%d\n",
55083- __FILE__,__LINE__, tty->driver->name, port->count );
55084+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
55085
55086 spin_lock_irqsave(&info->lock, flags);
55087- port->count--;
55088+ atomic_dec(&port->count);
55089 spin_unlock_irqrestore(&info->lock, flags);
55090 port->blocked_open++;
55091
55092@@ -3349,7 +3349,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
55093
55094 if (debug_level >= DEBUG_LEVEL_INFO)
55095 printk("%s(%d):%s block_til_ready() count=%d\n",
55096- __FILE__,__LINE__, tty->driver->name, port->count );
55097+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
55098
55099 tty_unlock(tty);
55100 schedule();
55101@@ -3359,12 +3359,12 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
55102 set_current_state(TASK_RUNNING);
55103 remove_wait_queue(&port->open_wait, &wait);
55104 if (!tty_hung_up_p(filp))
55105- port->count++;
55106+ atomic_inc(&port->count);
55107 port->blocked_open--;
55108
55109 if (debug_level >= DEBUG_LEVEL_INFO)
55110 printk("%s(%d):%s block_til_ready() after, count=%d\n",
55111- __FILE__,__LINE__, tty->driver->name, port->count );
55112+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
55113
55114 if (!retval)
55115 port->flags |= ASYNC_NORMAL_ACTIVE;
55116diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
55117index 259a4d5..9b0c9e7 100644
55118--- a/drivers/tty/sysrq.c
55119+++ b/drivers/tty/sysrq.c
55120@@ -1085,7 +1085,7 @@ EXPORT_SYMBOL(unregister_sysrq_key);
55121 static ssize_t write_sysrq_trigger(struct file *file, const char __user *buf,
55122 size_t count, loff_t *ppos)
55123 {
55124- if (count) {
55125+ if (count && capable(CAP_SYS_ADMIN)) {
55126 char c;
55127
55128 if (get_user(c, buf))
55129diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
55130index 2bb4dfc..a7f6e86 100644
55131--- a/drivers/tty/tty_io.c
55132+++ b/drivers/tty/tty_io.c
55133@@ -3503,7 +3503,7 @@ EXPORT_SYMBOL(tty_devnum);
55134
55135 void tty_default_fops(struct file_operations *fops)
55136 {
55137- *fops = tty_fops;
55138+ memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
55139 }
55140
55141 /*
55142diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
55143index 3737f55..7cef448 100644
55144--- a/drivers/tty/tty_ldisc.c
55145+++ b/drivers/tty/tty_ldisc.c
55146@@ -71,7 +71,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
55147 raw_spin_lock_irqsave(&tty_ldiscs_lock, flags);
55148 tty_ldiscs[disc] = new_ldisc;
55149 new_ldisc->num = disc;
55150- new_ldisc->refcount = 0;
55151+ atomic_set(&new_ldisc->refcount, 0);
55152 raw_spin_unlock_irqrestore(&tty_ldiscs_lock, flags);
55153
55154 return ret;
55155@@ -99,7 +99,7 @@ int tty_unregister_ldisc(int disc)
55156 return -EINVAL;
55157
55158 raw_spin_lock_irqsave(&tty_ldiscs_lock, flags);
55159- if (tty_ldiscs[disc]->refcount)
55160+ if (atomic_read(&tty_ldiscs[disc]->refcount))
55161 ret = -EBUSY;
55162 else
55163 tty_ldiscs[disc] = NULL;
55164@@ -120,7 +120,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
55165 if (ldops) {
55166 ret = ERR_PTR(-EAGAIN);
55167 if (try_module_get(ldops->owner)) {
55168- ldops->refcount++;
55169+ atomic_inc(&ldops->refcount);
55170 ret = ldops;
55171 }
55172 }
55173@@ -133,7 +133,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
55174 unsigned long flags;
55175
55176 raw_spin_lock_irqsave(&tty_ldiscs_lock, flags);
55177- ldops->refcount--;
55178+ atomic_dec(&ldops->refcount);
55179 module_put(ldops->owner);
55180 raw_spin_unlock_irqrestore(&tty_ldiscs_lock, flags);
55181 }
55182diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
55183index 40b31835..94d92ae 100644
55184--- a/drivers/tty/tty_port.c
55185+++ b/drivers/tty/tty_port.c
55186@@ -236,7 +236,7 @@ void tty_port_hangup(struct tty_port *port)
55187 unsigned long flags;
55188
55189 spin_lock_irqsave(&port->lock, flags);
55190- port->count = 0;
55191+ atomic_set(&port->count, 0);
55192 port->flags &= ~ASYNC_NORMAL_ACTIVE;
55193 tty = port->tty;
55194 if (tty)
55195@@ -398,7 +398,7 @@ int tty_port_block_til_ready(struct tty_port *port,
55196
55197 /* The port lock protects the port counts */
55198 spin_lock_irqsave(&port->lock, flags);
55199- port->count--;
55200+ atomic_dec(&port->count);
55201 port->blocked_open++;
55202 spin_unlock_irqrestore(&port->lock, flags);
55203
55204@@ -440,7 +440,7 @@ int tty_port_block_til_ready(struct tty_port *port,
55205 we must not mess that up further */
55206 spin_lock_irqsave(&port->lock, flags);
55207 if (!tty_hung_up_p(filp))
55208- port->count++;
55209+ atomic_inc(&port->count);
55210 port->blocked_open--;
55211 if (retval == 0)
55212 port->flags |= ASYNC_NORMAL_ACTIVE;
55213@@ -476,19 +476,19 @@ int tty_port_close_start(struct tty_port *port,
55214 return 0;
55215
55216 spin_lock_irqsave(&port->lock, flags);
55217- if (tty->count == 1 && port->count != 1) {
55218+ if (tty->count == 1 && atomic_read(&port->count) != 1) {
55219 printk(KERN_WARNING
55220 "tty_port_close_start: tty->count = 1 port count = %d.\n",
55221- port->count);
55222- port->count = 1;
55223+ atomic_read(&port->count));
55224+ atomic_set(&port->count, 1);
55225 }
55226- if (--port->count < 0) {
55227+ if (atomic_dec_return(&port->count) < 0) {
55228 printk(KERN_WARNING "tty_port_close_start: count = %d\n",
55229- port->count);
55230- port->count = 0;
55231+ atomic_read(&port->count));
55232+ atomic_set(&port->count, 0);
55233 }
55234
55235- if (port->count) {
55236+ if (atomic_read(&port->count)) {
55237 spin_unlock_irqrestore(&port->lock, flags);
55238 return 0;
55239 }
55240@@ -590,7 +590,7 @@ int tty_port_open(struct tty_port *port, struct tty_struct *tty,
55241 struct file *filp)
55242 {
55243 spin_lock_irq(&port->lock);
55244- ++port->count;
55245+ atomic_inc(&port->count);
55246 spin_unlock_irq(&port->lock);
55247 tty_port_tty_set(port, tty);
55248
55249diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
55250index 8a89f6e..50b32af 100644
55251--- a/drivers/tty/vt/keyboard.c
55252+++ b/drivers/tty/vt/keyboard.c
55253@@ -641,6 +641,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
55254 kbd->kbdmode == VC_OFF) &&
55255 value != KVAL(K_SAK))
55256 return; /* SAK is allowed even in raw mode */
55257+
55258+#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
55259+ {
55260+ void *func = fn_handler[value];
55261+ if (func == fn_show_state || func == fn_show_ptregs ||
55262+ func == fn_show_mem)
55263+ return;
55264+ }
55265+#endif
55266+
55267 fn_handler[value](vc);
55268 }
55269
55270@@ -1776,9 +1786,6 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
55271 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
55272 return -EFAULT;
55273
55274- if (!capable(CAP_SYS_TTY_CONFIG))
55275- perm = 0;
55276-
55277 switch (cmd) {
55278 case KDGKBENT:
55279 /* Ensure another thread doesn't free it under us */
55280@@ -1793,6 +1800,9 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
55281 spin_unlock_irqrestore(&kbd_event_lock, flags);
55282 return put_user(val, &user_kbe->kb_value);
55283 case KDSKBENT:
55284+ if (!capable(CAP_SYS_TTY_CONFIG))
55285+ perm = 0;
55286+
55287 if (!perm)
55288 return -EPERM;
55289 if (!i && v == K_NOSUCHMAP) {
55290@@ -1883,9 +1893,6 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
55291 int i, j, k;
55292 int ret;
55293
55294- if (!capable(CAP_SYS_TTY_CONFIG))
55295- perm = 0;
55296-
55297 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
55298 if (!kbs) {
55299 ret = -ENOMEM;
55300@@ -1919,6 +1926,9 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
55301 kfree(kbs);
55302 return ((p && *p) ? -EOVERFLOW : 0);
55303 case KDSKBSENT:
55304+ if (!capable(CAP_SYS_TTY_CONFIG))
55305+ perm = 0;
55306+
55307 if (!perm) {
55308 ret = -EPERM;
55309 goto reterr;
55310diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
55311index 6276f13..84f2449 100644
55312--- a/drivers/uio/uio.c
55313+++ b/drivers/uio/uio.c
55314@@ -25,6 +25,7 @@
55315 #include <linux/kobject.h>
55316 #include <linux/cdev.h>
55317 #include <linux/uio_driver.h>
55318+#include <asm/local.h>
55319
55320 #define UIO_MAX_DEVICES (1U << MINORBITS)
55321
55322@@ -231,7 +232,7 @@ static ssize_t event_show(struct device *dev,
55323 struct device_attribute *attr, char *buf)
55324 {
55325 struct uio_device *idev = dev_get_drvdata(dev);
55326- return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
55327+ return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
55328 }
55329 static DEVICE_ATTR_RO(event);
55330
55331@@ -393,7 +394,7 @@ void uio_event_notify(struct uio_info *info)
55332 {
55333 struct uio_device *idev = info->uio_dev;
55334
55335- atomic_inc(&idev->event);
55336+ atomic_inc_unchecked(&idev->event);
55337 wake_up_interruptible(&idev->wait);
55338 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
55339 }
55340@@ -446,7 +447,7 @@ static int uio_open(struct inode *inode, struct file *filep)
55341 }
55342
55343 listener->dev = idev;
55344- listener->event_count = atomic_read(&idev->event);
55345+ listener->event_count = atomic_read_unchecked(&idev->event);
55346 filep->private_data = listener;
55347
55348 if (idev->info->open) {
55349@@ -497,7 +498,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait)
55350 return -EIO;
55351
55352 poll_wait(filep, &idev->wait, wait);
55353- if (listener->event_count != atomic_read(&idev->event))
55354+ if (listener->event_count != atomic_read_unchecked(&idev->event))
55355 return POLLIN | POLLRDNORM;
55356 return 0;
55357 }
55358@@ -522,7 +523,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
55359 do {
55360 set_current_state(TASK_INTERRUPTIBLE);
55361
55362- event_count = atomic_read(&idev->event);
55363+ event_count = atomic_read_unchecked(&idev->event);
55364 if (event_count != listener->event_count) {
55365 if (copy_to_user(buf, &event_count, count))
55366 retval = -EFAULT;
55367@@ -579,9 +580,13 @@ static ssize_t uio_write(struct file *filep, const char __user *buf,
55368 static int uio_find_mem_index(struct vm_area_struct *vma)
55369 {
55370 struct uio_device *idev = vma->vm_private_data;
55371+ unsigned long size;
55372
55373 if (vma->vm_pgoff < MAX_UIO_MAPS) {
55374- if (idev->info->mem[vma->vm_pgoff].size == 0)
55375+ size = idev->info->mem[vma->vm_pgoff].size;
55376+ if (size == 0)
55377+ return -1;
55378+ if (vma->vm_end - vma->vm_start > size)
55379 return -1;
55380 return (int)vma->vm_pgoff;
55381 }
55382@@ -813,7 +818,7 @@ int __uio_register_device(struct module *owner,
55383 idev->owner = owner;
55384 idev->info = info;
55385 init_waitqueue_head(&idev->wait);
55386- atomic_set(&idev->event, 0);
55387+ atomic_set_unchecked(&idev->event, 0);
55388
55389 ret = uio_get_minor(idev);
55390 if (ret)
55391diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
55392index 813d4d3..a71934f 100644
55393--- a/drivers/usb/atm/cxacru.c
55394+++ b/drivers/usb/atm/cxacru.c
55395@@ -472,7 +472,7 @@ static ssize_t cxacru_sysfs_store_adsl_config(struct device *dev,
55396 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
55397 if (ret < 2)
55398 return -EINVAL;
55399- if (index < 0 || index > 0x7f)
55400+ if (index > 0x7f)
55401 return -EINVAL;
55402 pos += tmp;
55403
55404diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
55405index dada014..1d0d517 100644
55406--- a/drivers/usb/atm/usbatm.c
55407+++ b/drivers/usb/atm/usbatm.c
55408@@ -331,7 +331,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
55409 if (printk_ratelimit())
55410 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
55411 __func__, vpi, vci);
55412- atomic_inc(&vcc->stats->rx_err);
55413+ atomic_inc_unchecked(&vcc->stats->rx_err);
55414 return;
55415 }
55416
55417@@ -358,7 +358,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
55418 if (length > ATM_MAX_AAL5_PDU) {
55419 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
55420 __func__, length, vcc);
55421- atomic_inc(&vcc->stats->rx_err);
55422+ atomic_inc_unchecked(&vcc->stats->rx_err);
55423 goto out;
55424 }
55425
55426@@ -367,14 +367,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
55427 if (sarb->len < pdu_length) {
55428 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
55429 __func__, pdu_length, sarb->len, vcc);
55430- atomic_inc(&vcc->stats->rx_err);
55431+ atomic_inc_unchecked(&vcc->stats->rx_err);
55432 goto out;
55433 }
55434
55435 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
55436 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
55437 __func__, vcc);
55438- atomic_inc(&vcc->stats->rx_err);
55439+ atomic_inc_unchecked(&vcc->stats->rx_err);
55440 goto out;
55441 }
55442
55443@@ -386,7 +386,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
55444 if (printk_ratelimit())
55445 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
55446 __func__, length);
55447- atomic_inc(&vcc->stats->rx_drop);
55448+ atomic_inc_unchecked(&vcc->stats->rx_drop);
55449 goto out;
55450 }
55451
55452@@ -414,7 +414,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
55453
55454 vcc->push(vcc, skb);
55455
55456- atomic_inc(&vcc->stats->rx);
55457+ atomic_inc_unchecked(&vcc->stats->rx);
55458 out:
55459 skb_trim(sarb, 0);
55460 }
55461@@ -612,7 +612,7 @@ static void usbatm_tx_process(unsigned long data)
55462 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
55463
55464 usbatm_pop(vcc, skb);
55465- atomic_inc(&vcc->stats->tx);
55466+ atomic_inc_unchecked(&vcc->stats->tx);
55467
55468 skb = skb_dequeue(&instance->sndqueue);
55469 }
55470@@ -756,11 +756,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t *pos, char *page
55471 if (!left--)
55472 return sprintf(page,
55473 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
55474- atomic_read(&atm_dev->stats.aal5.tx),
55475- atomic_read(&atm_dev->stats.aal5.tx_err),
55476- atomic_read(&atm_dev->stats.aal5.rx),
55477- atomic_read(&atm_dev->stats.aal5.rx_err),
55478- atomic_read(&atm_dev->stats.aal5.rx_drop));
55479+ atomic_read_unchecked(&atm_dev->stats.aal5.tx),
55480+ atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
55481+ atomic_read_unchecked(&atm_dev->stats.aal5.rx),
55482+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
55483+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
55484
55485 if (!left--) {
55486 if (instance->disconnected)
55487diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
55488index 2a3bbdf..91d72cf 100644
55489--- a/drivers/usb/core/devices.c
55490+++ b/drivers/usb/core/devices.c
55491@@ -126,7 +126,7 @@ static const char format_endpt[] =
55492 * time it gets called.
55493 */
55494 static struct device_connect_event {
55495- atomic_t count;
55496+ atomic_unchecked_t count;
55497 wait_queue_head_t wait;
55498 } device_event = {
55499 .count = ATOMIC_INIT(1),
55500@@ -164,7 +164,7 @@ static const struct class_info clas_info[] = {
55501
55502 void usbfs_conn_disc_event(void)
55503 {
55504- atomic_add(2, &device_event.count);
55505+ atomic_add_unchecked(2, &device_event.count);
55506 wake_up(&device_event.wait);
55507 }
55508
55509@@ -652,7 +652,7 @@ static unsigned int usb_device_poll(struct file *file,
55510
55511 poll_wait(file, &device_event.wait, wait);
55512
55513- event_count = atomic_read(&device_event.count);
55514+ event_count = atomic_read_unchecked(&device_event.count);
55515 if (file->f_version != event_count) {
55516 file->f_version = event_count;
55517 return POLLIN | POLLRDNORM;
55518diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
55519index 1163553..f292679 100644
55520--- a/drivers/usb/core/devio.c
55521+++ b/drivers/usb/core/devio.c
55522@@ -187,7 +187,7 @@ static ssize_t usbdev_read(struct file *file, char __user *buf, size_t nbytes,
55523 struct usb_dev_state *ps = file->private_data;
55524 struct usb_device *dev = ps->dev;
55525 ssize_t ret = 0;
55526- unsigned len;
55527+ size_t len;
55528 loff_t pos;
55529 int i;
55530
55531@@ -229,22 +229,22 @@ static ssize_t usbdev_read(struct file *file, char __user *buf, size_t nbytes,
55532 for (i = 0; nbytes && i < dev->descriptor.bNumConfigurations; i++) {
55533 struct usb_config_descriptor *config =
55534 (struct usb_config_descriptor *)dev->rawdescriptors[i];
55535- unsigned int length = le16_to_cpu(config->wTotalLength);
55536+ size_t length = le16_to_cpu(config->wTotalLength);
55537
55538 if (*ppos < pos + length) {
55539
55540 /* The descriptor may claim to be longer than it
55541 * really is. Here is the actual allocated length. */
55542- unsigned alloclen =
55543+ size_t alloclen =
55544 le16_to_cpu(dev->config[i].desc.wTotalLength);
55545
55546- len = length - (*ppos - pos);
55547+ len = length + pos - *ppos;
55548 if (len > nbytes)
55549 len = nbytes;
55550
55551 /* Simply don't write (skip over) unallocated parts */
55552 if (alloclen > (*ppos - pos)) {
55553- alloclen -= (*ppos - pos);
55554+ alloclen = alloclen + pos - *ppos;
55555 if (copy_to_user(buf,
55556 dev->rawdescriptors[i] + (*ppos - pos),
55557 min(len, alloclen))) {
55558diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
55559index 45a915c..09f9735 100644
55560--- a/drivers/usb/core/hcd.c
55561+++ b/drivers/usb/core/hcd.c
55562@@ -1551,7 +1551,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
55563 */
55564 usb_get_urb(urb);
55565 atomic_inc(&urb->use_count);
55566- atomic_inc(&urb->dev->urbnum);
55567+ atomic_inc_unchecked(&urb->dev->urbnum);
55568 usbmon_urb_submit(&hcd->self, urb);
55569
55570 /* NOTE requirements on root-hub callers (usbfs and the hub
55571@@ -1578,7 +1578,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
55572 urb->hcpriv = NULL;
55573 INIT_LIST_HEAD(&urb->urb_list);
55574 atomic_dec(&urb->use_count);
55575- atomic_dec(&urb->dev->urbnum);
55576+ atomic_dec_unchecked(&urb->dev->urbnum);
55577 if (atomic_read(&urb->reject))
55578 wake_up(&usb_kill_urb_queue);
55579 usb_put_urb(urb);
55580diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
55581index 3b71516..1f26579 100644
55582--- a/drivers/usb/core/hub.c
55583+++ b/drivers/usb/core/hub.c
55584@@ -26,6 +26,7 @@
55585 #include <linux/mutex.h>
55586 #include <linux/random.h>
55587 #include <linux/pm_qos.h>
55588+#include <linux/grsecurity.h>
55589
55590 #include <asm/uaccess.h>
55591 #include <asm/byteorder.h>
55592@@ -4665,6 +4666,10 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
55593 goto done;
55594 return;
55595 }
55596+
55597+ if (gr_handle_new_usb())
55598+ goto done;
55599+
55600 if (hub_is_superspeed(hub->hdev))
55601 unit_load = 150;
55602 else
55603diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
55604index f368d20..0c30ac5 100644
55605--- a/drivers/usb/core/message.c
55606+++ b/drivers/usb/core/message.c
55607@@ -128,7 +128,7 @@ static int usb_internal_control_msg(struct usb_device *usb_dev,
55608 * Return: If successful, the number of bytes transferred. Otherwise, a negative
55609 * error number.
55610 */
55611-int usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request,
55612+int __intentional_overflow(-1) usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request,
55613 __u8 requesttype, __u16 value, __u16 index, void *data,
55614 __u16 size, int timeout)
55615 {
55616@@ -180,7 +180,7 @@ EXPORT_SYMBOL_GPL(usb_control_msg);
55617 * If successful, 0. Otherwise a negative error number. The number of actual
55618 * bytes transferred will be stored in the @actual_length parameter.
55619 */
55620-int usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
55621+int __intentional_overflow(-1) usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
55622 void *data, int len, int *actual_length, int timeout)
55623 {
55624 return usb_bulk_msg(usb_dev, pipe, data, len, actual_length, timeout);
55625@@ -220,7 +220,7 @@ EXPORT_SYMBOL_GPL(usb_interrupt_msg);
55626 * bytes transferred will be stored in the @actual_length parameter.
55627 *
55628 */
55629-int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
55630+int __intentional_overflow(-1) usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
55631 void *data, int len, int *actual_length, int timeout)
55632 {
55633 struct urb *urb;
55634diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
55635index d269738..7340cd7 100644
55636--- a/drivers/usb/core/sysfs.c
55637+++ b/drivers/usb/core/sysfs.c
55638@@ -244,7 +244,7 @@ static ssize_t urbnum_show(struct device *dev, struct device_attribute *attr,
55639 struct usb_device *udev;
55640
55641 udev = to_usb_device(dev);
55642- return sprintf(buf, "%d\n", atomic_read(&udev->urbnum));
55643+ return sprintf(buf, "%d\n", atomic_read_unchecked(&udev->urbnum));
55644 }
55645 static DEVICE_ATTR_RO(urbnum);
55646
55647diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
55648index b1fb9ae..4224885 100644
55649--- a/drivers/usb/core/usb.c
55650+++ b/drivers/usb/core/usb.c
55651@@ -431,7 +431,7 @@ struct usb_device *usb_alloc_dev(struct usb_device *parent,
55652 set_dev_node(&dev->dev, dev_to_node(bus->controller));
55653 dev->state = USB_STATE_ATTACHED;
55654 dev->lpm_disable_count = 1;
55655- atomic_set(&dev->urbnum, 0);
55656+ atomic_set_unchecked(&dev->urbnum, 0);
55657
55658 INIT_LIST_HEAD(&dev->ep0.urb_list);
55659 dev->ep0.desc.bLength = USB_DT_ENDPOINT_SIZE;
55660diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
55661index 8cfc319..4868255 100644
55662--- a/drivers/usb/early/ehci-dbgp.c
55663+++ b/drivers/usb/early/ehci-dbgp.c
55664@@ -98,7 +98,8 @@ static inline u32 dbgp_len_update(u32 x, u32 len)
55665
55666 #ifdef CONFIG_KGDB
55667 static struct kgdb_io kgdbdbgp_io_ops;
55668-#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
55669+static struct kgdb_io kgdbdbgp_io_ops_console;
55670+#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
55671 #else
55672 #define dbgp_kgdb_mode (0)
55673 #endif
55674@@ -1043,6 +1044,13 @@ static struct kgdb_io kgdbdbgp_io_ops = {
55675 .write_char = kgdbdbgp_write_char,
55676 };
55677
55678+static struct kgdb_io kgdbdbgp_io_ops_console = {
55679+ .name = "kgdbdbgp",
55680+ .read_char = kgdbdbgp_read_char,
55681+ .write_char = kgdbdbgp_write_char,
55682+ .is_console = 1
55683+};
55684+
55685 static int kgdbdbgp_wait_time;
55686
55687 static int __init kgdbdbgp_parse_config(char *str)
55688@@ -1058,8 +1066,10 @@ static int __init kgdbdbgp_parse_config(char *str)
55689 ptr++;
55690 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
55691 }
55692- kgdb_register_io_module(&kgdbdbgp_io_ops);
55693- kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
55694+ if (early_dbgp_console.index != -1)
55695+ kgdb_register_io_module(&kgdbdbgp_io_ops_console);
55696+ else
55697+ kgdb_register_io_module(&kgdbdbgp_io_ops);
55698
55699 return 0;
55700 }
55701diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
55702index 0495c94..289e201 100644
55703--- a/drivers/usb/gadget/configfs.c
55704+++ b/drivers/usb/gadget/configfs.c
55705@@ -571,7 +571,7 @@ static struct config_group *function_make(
55706 if (IS_ERR(fi))
55707 return ERR_CAST(fi);
55708
55709- ret = config_item_set_name(&fi->group.cg_item, name);
55710+ ret = config_item_set_name(&fi->group.cg_item, "%s", name);
55711 if (ret) {
55712 usb_put_function_instance(fi);
55713 return ERR_PTR(ret);
55714diff --git a/drivers/usb/gadget/function/f_uac1.c b/drivers/usb/gadget/function/f_uac1.c
55715index 9719abf..789d5d9 100644
55716--- a/drivers/usb/gadget/function/f_uac1.c
55717+++ b/drivers/usb/gadget/function/f_uac1.c
55718@@ -14,6 +14,7 @@
55719 #include <linux/module.h>
55720 #include <linux/device.h>
55721 #include <linux/atomic.h>
55722+#include <linux/module.h>
55723
55724 #include "u_uac1.h"
55725
55726diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c
55727index 491082a..dfd7d17 100644
55728--- a/drivers/usb/gadget/function/u_serial.c
55729+++ b/drivers/usb/gadget/function/u_serial.c
55730@@ -729,9 +729,9 @@ static int gs_open(struct tty_struct *tty, struct file *file)
55731 spin_lock_irq(&port->port_lock);
55732
55733 /* already open? Great. */
55734- if (port->port.count) {
55735+ if (atomic_read(&port->port.count)) {
55736 status = 0;
55737- port->port.count++;
55738+ atomic_inc(&port->port.count);
55739
55740 /* currently opening/closing? wait ... */
55741 } else if (port->openclose) {
55742@@ -790,7 +790,7 @@ static int gs_open(struct tty_struct *tty, struct file *file)
55743 tty->driver_data = port;
55744 port->port.tty = tty;
55745
55746- port->port.count = 1;
55747+ atomic_set(&port->port.count, 1);
55748 port->openclose = false;
55749
55750 /* if connected, start the I/O stream */
55751@@ -832,11 +832,11 @@ static void gs_close(struct tty_struct *tty, struct file *file)
55752
55753 spin_lock_irq(&port->port_lock);
55754
55755- if (port->port.count != 1) {
55756- if (port->port.count == 0)
55757+ if (atomic_read(&port->port.count) != 1) {
55758+ if (atomic_read(&port->port.count) == 0)
55759 WARN_ON(1);
55760 else
55761- --port->port.count;
55762+ atomic_dec(&port->port.count);
55763 goto exit;
55764 }
55765
55766@@ -846,7 +846,7 @@ static void gs_close(struct tty_struct *tty, struct file *file)
55767 * and sleep if necessary
55768 */
55769 port->openclose = true;
55770- port->port.count = 0;
55771+ atomic_set(&port->port.count, 0);
55772
55773 gser = port->port_usb;
55774 if (gser && gser->disconnect)
55775@@ -1062,7 +1062,7 @@ static int gs_closed(struct gs_port *port)
55776 int cond;
55777
55778 spin_lock_irq(&port->port_lock);
55779- cond = (port->port.count == 0) && !port->openclose;
55780+ cond = (atomic_read(&port->port.count) == 0) && !port->openclose;
55781 spin_unlock_irq(&port->port_lock);
55782 return cond;
55783 }
55784@@ -1205,7 +1205,7 @@ int gserial_connect(struct gserial *gser, u8 port_num)
55785 /* if it's already open, start I/O ... and notify the serial
55786 * protocol about open/close status (connect/disconnect).
55787 */
55788- if (port->port.count) {
55789+ if (atomic_read(&port->port.count)) {
55790 pr_debug("gserial_connect: start ttyGS%d\n", port->port_num);
55791 gs_start_io(port);
55792 if (gser->connect)
55793@@ -1252,7 +1252,7 @@ void gserial_disconnect(struct gserial *gser)
55794
55795 port->port_usb = NULL;
55796 gser->ioport = NULL;
55797- if (port->port.count > 0 || port->openclose) {
55798+ if (atomic_read(&port->port.count) > 0 || port->openclose) {
55799 wake_up_interruptible(&port->drain_wait);
55800 if (port->port.tty)
55801 tty_hangup(port->port.tty);
55802@@ -1268,7 +1268,7 @@ void gserial_disconnect(struct gserial *gser)
55803
55804 /* finally, free any unused/unusable I/O buffers */
55805 spin_lock_irqsave(&port->port_lock, flags);
55806- if (port->port.count == 0 && !port->openclose)
55807+ if (atomic_read(&port->port.count) == 0 && !port->openclose)
55808 gs_buf_free(&port->port_write_buf);
55809 gs_free_requests(gser->out, &port->read_pool, NULL);
55810 gs_free_requests(gser->out, &port->read_queue, NULL);
55811diff --git a/drivers/usb/gadget/function/u_uac1.c b/drivers/usb/gadget/function/u_uac1.c
55812index c78c841..48fd281 100644
55813--- a/drivers/usb/gadget/function/u_uac1.c
55814+++ b/drivers/usb/gadget/function/u_uac1.c
55815@@ -17,6 +17,7 @@
55816 #include <linux/ctype.h>
55817 #include <linux/random.h>
55818 #include <linux/syscalls.h>
55819+#include <linux/module.h>
55820
55821 #include "u_uac1.h"
55822
55823diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
55824index 7354d01..299478e 100644
55825--- a/drivers/usb/host/ehci-hub.c
55826+++ b/drivers/usb/host/ehci-hub.c
55827@@ -772,7 +772,7 @@ static struct urb *request_single_step_set_feature_urb(
55828 urb->transfer_flags = URB_DIR_IN;
55829 usb_get_urb(urb);
55830 atomic_inc(&urb->use_count);
55831- atomic_inc(&urb->dev->urbnum);
55832+ atomic_inc_unchecked(&urb->dev->urbnum);
55833 urb->setup_dma = dma_map_single(
55834 hcd->self.controller,
55835 urb->setup_packet,
55836@@ -839,7 +839,7 @@ static int ehset_single_step_set_feature(struct usb_hcd *hcd, int port)
55837 urb->status = -EINPROGRESS;
55838 usb_get_urb(urb);
55839 atomic_inc(&urb->use_count);
55840- atomic_inc(&urb->dev->urbnum);
55841+ atomic_inc_unchecked(&urb->dev->urbnum);
55842 retval = submit_single_step_set_feature(hcd, urb, 0);
55843 if (!retval && !wait_for_completion_timeout(&done,
55844 msecs_to_jiffies(2000))) {
55845diff --git a/drivers/usb/host/hwa-hc.c b/drivers/usb/host/hwa-hc.c
55846index 1db0626..4948782 100644
55847--- a/drivers/usb/host/hwa-hc.c
55848+++ b/drivers/usb/host/hwa-hc.c
55849@@ -337,7 +337,10 @@ static int __hwahc_op_bwa_set(struct wusbhc *wusbhc, s8 stream_index,
55850 struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
55851 struct wahc *wa = &hwahc->wa;
55852 struct device *dev = &wa->usb_iface->dev;
55853- u8 mas_le[UWB_NUM_MAS/8];
55854+ u8 *mas_le = kmalloc(UWB_NUM_MAS/8, GFP_KERNEL);
55855+
55856+ if (mas_le == NULL)
55857+ return -ENOMEM;
55858
55859 /* Set the stream index */
55860 result = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
55861@@ -356,10 +359,12 @@ static int __hwahc_op_bwa_set(struct wusbhc *wusbhc, s8 stream_index,
55862 WUSB_REQ_SET_WUSB_MAS,
55863 USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
55864 0, wa->usb_iface->cur_altsetting->desc.bInterfaceNumber,
55865- mas_le, 32, USB_CTRL_SET_TIMEOUT);
55866+ mas_le, UWB_NUM_MAS/8, USB_CTRL_SET_TIMEOUT);
55867 if (result < 0)
55868 dev_err(dev, "Cannot set WUSB MAS allocation: %d\n", result);
55869 out:
55870+ kfree(mas_le);
55871+
55872 return result;
55873 }
55874
55875diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c
55876index b3d245e..99549ed 100644
55877--- a/drivers/usb/misc/appledisplay.c
55878+++ b/drivers/usb/misc/appledisplay.c
55879@@ -84,7 +84,7 @@ struct appledisplay {
55880 struct mutex sysfslock; /* concurrent read and write */
55881 };
55882
55883-static atomic_t count_displays = ATOMIC_INIT(0);
55884+static atomic_unchecked_t count_displays = ATOMIC_INIT(0);
55885 static struct workqueue_struct *wq;
55886
55887 static void appledisplay_complete(struct urb *urb)
55888@@ -288,7 +288,7 @@ static int appledisplay_probe(struct usb_interface *iface,
55889
55890 /* Register backlight device */
55891 snprintf(bl_name, sizeof(bl_name), "appledisplay%d",
55892- atomic_inc_return(&count_displays) - 1);
55893+ atomic_inc_return_unchecked(&count_displays) - 1);
55894 memset(&props, 0, sizeof(struct backlight_properties));
55895 props.type = BACKLIGHT_RAW;
55896 props.max_brightness = 0xff;
55897diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c
55898index 3806e70..55c508b 100644
55899--- a/drivers/usb/serial/console.c
55900+++ b/drivers/usb/serial/console.c
55901@@ -126,7 +126,7 @@ static int usb_console_setup(struct console *co, char *options)
55902
55903 info->port = port;
55904
55905- ++port->port.count;
55906+ atomic_inc(&port->port.count);
55907 if (!test_bit(ASYNCB_INITIALIZED, &port->port.flags)) {
55908 if (serial->type->set_termios) {
55909 /*
55910@@ -175,7 +175,7 @@ static int usb_console_setup(struct console *co, char *options)
55911 }
55912 /* Now that any required fake tty operations are completed restore
55913 * the tty port count */
55914- --port->port.count;
55915+ atomic_dec(&port->port.count);
55916 /* The console is special in terms of closing the device so
55917 * indicate this port is now acting as a system console. */
55918 port->port.console = 1;
55919@@ -188,7 +188,7 @@ static int usb_console_setup(struct console *co, char *options)
55920 put_tty:
55921 tty_kref_put(tty);
55922 reset_open_count:
55923- port->port.count = 0;
55924+ atomic_set(&port->port.count, 0);
55925 usb_autopm_put_interface(serial->interface);
55926 error_get_interface:
55927 usb_serial_put(serial);
55928@@ -199,7 +199,7 @@ static int usb_console_setup(struct console *co, char *options)
55929 static void usb_console_write(struct console *co,
55930 const char *buf, unsigned count)
55931 {
55932- static struct usbcons_info *info = &usbcons_info;
55933+ struct usbcons_info *info = &usbcons_info;
55934 struct usb_serial_port *port = info->port;
55935 struct usb_serial *serial;
55936 int retval = -ENODEV;
55937diff --git a/drivers/usb/storage/usb.h b/drivers/usb/storage/usb.h
55938index 307e339..6aa97cb 100644
55939--- a/drivers/usb/storage/usb.h
55940+++ b/drivers/usb/storage/usb.h
55941@@ -63,7 +63,7 @@ struct us_unusual_dev {
55942 __u8 useProtocol;
55943 __u8 useTransport;
55944 int (*initFunction)(struct us_data *);
55945-};
55946+} __do_const;
55947
55948
55949 /* Dynamic bitflag definitions (us->dflags): used in set_bit() etc. */
55950diff --git a/drivers/usb/usbip/vhci.h b/drivers/usb/usbip/vhci.h
55951index a863a98..d272795 100644
55952--- a/drivers/usb/usbip/vhci.h
55953+++ b/drivers/usb/usbip/vhci.h
55954@@ -83,7 +83,7 @@ struct vhci_hcd {
55955 unsigned resuming:1;
55956 unsigned long re_timeout;
55957
55958- atomic_t seqnum;
55959+ atomic_unchecked_t seqnum;
55960
55961 /*
55962 * NOTE:
55963diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c
55964index 11f6f61..1087910 100644
55965--- a/drivers/usb/usbip/vhci_hcd.c
55966+++ b/drivers/usb/usbip/vhci_hcd.c
55967@@ -440,7 +440,7 @@ static void vhci_tx_urb(struct urb *urb)
55968
55969 spin_lock(&vdev->priv_lock);
55970
55971- priv->seqnum = atomic_inc_return(&the_controller->seqnum);
55972+ priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
55973 if (priv->seqnum == 0xffff)
55974 dev_info(&urb->dev->dev, "seqnum max\n");
55975
55976@@ -685,7 +685,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
55977 return -ENOMEM;
55978 }
55979
55980- unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
55981+ unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
55982 if (unlink->seqnum == 0xffff)
55983 pr_info("seqnum max\n");
55984
55985@@ -889,7 +889,7 @@ static int vhci_start(struct usb_hcd *hcd)
55986 vdev->rhport = rhport;
55987 }
55988
55989- atomic_set(&vhci->seqnum, 0);
55990+ atomic_set_unchecked(&vhci->seqnum, 0);
55991 spin_lock_init(&vhci->lock);
55992
55993 hcd->power_budget = 0; /* no limit */
55994diff --git a/drivers/usb/usbip/vhci_rx.c b/drivers/usb/usbip/vhci_rx.c
55995index 00e4a54..d676f85 100644
55996--- a/drivers/usb/usbip/vhci_rx.c
55997+++ b/drivers/usb/usbip/vhci_rx.c
55998@@ -80,7 +80,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
55999 if (!urb) {
56000 pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
56001 pr_info("max seqnum %d\n",
56002- atomic_read(&the_controller->seqnum));
56003+ atomic_read_unchecked(&the_controller->seqnum));
56004 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
56005 return;
56006 }
56007diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
56008index edc7267..9f65ce2 100644
56009--- a/drivers/usb/wusbcore/wa-hc.h
56010+++ b/drivers/usb/wusbcore/wa-hc.h
56011@@ -240,7 +240,7 @@ struct wahc {
56012 spinlock_t xfer_list_lock;
56013 struct work_struct xfer_enqueue_work;
56014 struct work_struct xfer_error_work;
56015- atomic_t xfer_id_count;
56016+ atomic_unchecked_t xfer_id_count;
56017
56018 kernel_ulong_t quirks;
56019 };
56020@@ -305,7 +305,7 @@ static inline void wa_init(struct wahc *wa)
56021 INIT_WORK(&wa->xfer_enqueue_work, wa_urb_enqueue_run);
56022 INIT_WORK(&wa->xfer_error_work, wa_process_errored_transfers_run);
56023 wa->dto_in_use = 0;
56024- atomic_set(&wa->xfer_id_count, 1);
56025+ atomic_set_unchecked(&wa->xfer_id_count, 1);
56026 /* init the buf in URBs */
56027 for (index = 0; index < WA_MAX_BUF_IN_URBS; ++index)
56028 usb_init_urb(&(wa->buf_in_urbs[index]));
56029diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
56030index 69af4fd..da390d7 100644
56031--- a/drivers/usb/wusbcore/wa-xfer.c
56032+++ b/drivers/usb/wusbcore/wa-xfer.c
56033@@ -314,7 +314,7 @@ static void wa_xfer_completion(struct wa_xfer *xfer)
56034 */
56035 static void wa_xfer_id_init(struct wa_xfer *xfer)
56036 {
56037- xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
56038+ xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
56039 }
56040
56041 /* Return the xfer's ID. */
56042diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
56043index 837d177..170724af 100644
56044--- a/drivers/vfio/vfio.c
56045+++ b/drivers/vfio/vfio.c
56046@@ -518,7 +518,7 @@ static int vfio_group_nb_add_dev(struct vfio_group *group, struct device *dev)
56047 return 0;
56048
56049 /* TODO Prevent device auto probing */
56050- WARN("Device %s added to live group %d!\n", dev_name(dev),
56051+ WARN(1, "Device %s added to live group %d!\n", dev_name(dev),
56052 iommu_group_id(group->iommu_group));
56053
56054 return 0;
56055diff --git a/drivers/vhost/vringh.c b/drivers/vhost/vringh.c
56056index 3bb02c6..a01ff38 100644
56057--- a/drivers/vhost/vringh.c
56058+++ b/drivers/vhost/vringh.c
56059@@ -551,7 +551,7 @@ static inline void __vringh_notify_disable(struct vringh *vrh,
56060 static inline int getu16_user(const struct vringh *vrh, u16 *val, const __virtio16 *p)
56061 {
56062 __virtio16 v = 0;
56063- int rc = get_user(v, (__force __virtio16 __user *)p);
56064+ int rc = get_user(v, (__force_user __virtio16 *)p);
56065 *val = vringh16_to_cpu(vrh, v);
56066 return rc;
56067 }
56068@@ -559,12 +559,12 @@ static inline int getu16_user(const struct vringh *vrh, u16 *val, const __virtio
56069 static inline int putu16_user(const struct vringh *vrh, __virtio16 *p, u16 val)
56070 {
56071 __virtio16 v = cpu_to_vringh16(vrh, val);
56072- return put_user(v, (__force __virtio16 __user *)p);
56073+ return put_user(v, (__force_user __virtio16 *)p);
56074 }
56075
56076 static inline int copydesc_user(void *dst, const void *src, size_t len)
56077 {
56078- return copy_from_user(dst, (__force void __user *)src, len) ?
56079+ return copy_from_user(dst, (void __force_user *)src, len) ?
56080 -EFAULT : 0;
56081 }
56082
56083@@ -572,19 +572,19 @@ static inline int putused_user(struct vring_used_elem *dst,
56084 const struct vring_used_elem *src,
56085 unsigned int num)
56086 {
56087- return copy_to_user((__force void __user *)dst, src,
56088+ return copy_to_user((void __force_user *)dst, src,
56089 sizeof(*dst) * num) ? -EFAULT : 0;
56090 }
56091
56092 static inline int xfer_from_user(void *src, void *dst, size_t len)
56093 {
56094- return copy_from_user(dst, (__force void __user *)src, len) ?
56095+ return copy_from_user(dst, (void __force_user *)src, len) ?
56096 -EFAULT : 0;
56097 }
56098
56099 static inline int xfer_to_user(void *dst, void *src, size_t len)
56100 {
56101- return copy_to_user((__force void __user *)dst, src, len) ?
56102+ return copy_to_user((void __force_user *)dst, src, len) ?
56103 -EFAULT : 0;
56104 }
56105
56106@@ -621,9 +621,9 @@ int vringh_init_user(struct vringh *vrh, u64 features,
56107 vrh->last_used_idx = 0;
56108 vrh->vring.num = num;
56109 /* vring expects kernel addresses, but only used via accessors. */
56110- vrh->vring.desc = (__force struct vring_desc *)desc;
56111- vrh->vring.avail = (__force struct vring_avail *)avail;
56112- vrh->vring.used = (__force struct vring_used *)used;
56113+ vrh->vring.desc = (__force_kernel struct vring_desc *)desc;
56114+ vrh->vring.avail = (__force_kernel struct vring_avail *)avail;
56115+ vrh->vring.used = (__force_kernel struct vring_used *)used;
56116 return 0;
56117 }
56118 EXPORT_SYMBOL(vringh_init_user);
56119@@ -826,7 +826,7 @@ static inline int getu16_kern(const struct vringh *vrh,
56120
56121 static inline int putu16_kern(const struct vringh *vrh, __virtio16 *p, u16 val)
56122 {
56123- ACCESS_ONCE(*p) = cpu_to_vringh16(vrh, val);
56124+ ACCESS_ONCE_RW(*p) = cpu_to_vringh16(vrh, val);
56125 return 0;
56126 }
56127
56128diff --git a/drivers/video/backlight/kb3886_bl.c b/drivers/video/backlight/kb3886_bl.c
56129index 84a110a..96312c3 100644
56130--- a/drivers/video/backlight/kb3886_bl.c
56131+++ b/drivers/video/backlight/kb3886_bl.c
56132@@ -78,7 +78,7 @@ static struct kb3886bl_machinfo *bl_machinfo;
56133 static unsigned long kb3886bl_flags;
56134 #define KB3886BL_SUSPENDED 0x01
56135
56136-static struct dmi_system_id kb3886bl_device_table[] __initdata = {
56137+static const struct dmi_system_id kb3886bl_device_table[] __initconst = {
56138 {
56139 .ident = "Sahara Touch-iT",
56140 .matches = {
56141diff --git a/drivers/video/fbdev/arcfb.c b/drivers/video/fbdev/arcfb.c
56142index 1b0b233..6f34c2c 100644
56143--- a/drivers/video/fbdev/arcfb.c
56144+++ b/drivers/video/fbdev/arcfb.c
56145@@ -458,7 +458,7 @@ static ssize_t arcfb_write(struct fb_info *info, const char __user *buf,
56146 return -ENOSPC;
56147
56148 err = 0;
56149- if ((count + p) > fbmemlength) {
56150+ if (count > (fbmemlength - p)) {
56151 count = fbmemlength - p;
56152 err = -ENOSPC;
56153 }
56154diff --git a/drivers/video/fbdev/aty/aty128fb.c b/drivers/video/fbdev/aty/aty128fb.c
56155index aedf2fb..47c9aca 100644
56156--- a/drivers/video/fbdev/aty/aty128fb.c
56157+++ b/drivers/video/fbdev/aty/aty128fb.c
56158@@ -149,7 +149,7 @@ enum {
56159 };
56160
56161 /* Must match above enum */
56162-static char * const r128_family[] = {
56163+static const char * const r128_family[] = {
56164 "AGP",
56165 "PCI",
56166 "PRO AGP",
56167diff --git a/drivers/video/fbdev/aty/atyfb_base.c b/drivers/video/fbdev/aty/atyfb_base.c
56168index 8789e48..698fe4c 100644
56169--- a/drivers/video/fbdev/aty/atyfb_base.c
56170+++ b/drivers/video/fbdev/aty/atyfb_base.c
56171@@ -1326,10 +1326,14 @@ static int atyfb_set_par(struct fb_info *info)
56172 par->accel_flags = var->accel_flags; /* hack */
56173
56174 if (var->accel_flags) {
56175- info->fbops->fb_sync = atyfb_sync;
56176+ pax_open_kernel();
56177+ *(void **)&info->fbops->fb_sync = atyfb_sync;
56178+ pax_close_kernel();
56179 info->flags &= ~FBINFO_HWACCEL_DISABLED;
56180 } else {
56181- info->fbops->fb_sync = NULL;
56182+ pax_open_kernel();
56183+ *(void **)&info->fbops->fb_sync = NULL;
56184+ pax_close_kernel();
56185 info->flags |= FBINFO_HWACCEL_DISABLED;
56186 }
56187
56188diff --git a/drivers/video/fbdev/aty/mach64_cursor.c b/drivers/video/fbdev/aty/mach64_cursor.c
56189index 2fa0317..4983f2a 100644
56190--- a/drivers/video/fbdev/aty/mach64_cursor.c
56191+++ b/drivers/video/fbdev/aty/mach64_cursor.c
56192@@ -8,6 +8,7 @@
56193 #include "../core/fb_draw.h"
56194
56195 #include <asm/io.h>
56196+#include <asm/pgtable.h>
56197
56198 #ifdef __sparc__
56199 #include <asm/fbio.h>
56200@@ -218,7 +219,9 @@ int aty_init_cursor(struct fb_info *info)
56201 info->sprite.buf_align = 16; /* and 64 lines tall. */
56202 info->sprite.flags = FB_PIXMAP_IO;
56203
56204- info->fbops->fb_cursor = atyfb_cursor;
56205+ pax_open_kernel();
56206+ *(void **)&info->fbops->fb_cursor = atyfb_cursor;
56207+ pax_close_kernel();
56208
56209 return 0;
56210 }
56211diff --git a/drivers/video/fbdev/core/fb_defio.c b/drivers/video/fbdev/core/fb_defio.c
56212index d6cab1f..112f680 100644
56213--- a/drivers/video/fbdev/core/fb_defio.c
56214+++ b/drivers/video/fbdev/core/fb_defio.c
56215@@ -207,7 +207,9 @@ void fb_deferred_io_init(struct fb_info *info)
56216
56217 BUG_ON(!fbdefio);
56218 mutex_init(&fbdefio->lock);
56219- info->fbops->fb_mmap = fb_deferred_io_mmap;
56220+ pax_open_kernel();
56221+ *(void **)&info->fbops->fb_mmap = fb_deferred_io_mmap;
56222+ pax_close_kernel();
56223 INIT_DELAYED_WORK(&info->deferred_work, fb_deferred_io_work);
56224 INIT_LIST_HEAD(&fbdefio->pagelist);
56225 if (fbdefio->delay == 0) /* set a default of 1 s */
56226@@ -238,7 +240,7 @@ void fb_deferred_io_cleanup(struct fb_info *info)
56227 page->mapping = NULL;
56228 }
56229
56230- info->fbops->fb_mmap = NULL;
56231+ *(void **)&info->fbops->fb_mmap = NULL;
56232 mutex_destroy(&fbdefio->lock);
56233 }
56234 EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup);
56235diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
56236index 0705d88..d9429bf 100644
56237--- a/drivers/video/fbdev/core/fbmem.c
56238+++ b/drivers/video/fbdev/core/fbmem.c
56239@@ -1301,7 +1301,7 @@ static int do_fscreeninfo_to_user(struct fb_fix_screeninfo *fix,
56240 __u32 data;
56241 int err;
56242
56243- err = copy_to_user(&fix32->id, &fix->id, sizeof(fix32->id));
56244+ err = copy_to_user(fix32->id, &fix->id, sizeof(fix32->id));
56245
56246 data = (__u32) (unsigned long) fix->smem_start;
56247 err |= put_user(data, &fix32->smem_start);
56248diff --git a/drivers/video/fbdev/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c
56249index 4254336..282567e 100644
56250--- a/drivers/video/fbdev/hyperv_fb.c
56251+++ b/drivers/video/fbdev/hyperv_fb.c
56252@@ -240,7 +240,7 @@ static uint screen_fb_size;
56253 static inline int synthvid_send(struct hv_device *hdev,
56254 struct synthvid_msg *msg)
56255 {
56256- static atomic64_t request_id = ATOMIC64_INIT(0);
56257+ static atomic64_unchecked_t request_id = ATOMIC64_INIT(0);
56258 int ret;
56259
56260 msg->pipe_hdr.type = PIPE_MSG_DATA;
56261@@ -248,7 +248,7 @@ static inline int synthvid_send(struct hv_device *hdev,
56262
56263 ret = vmbus_sendpacket(hdev->channel, msg,
56264 msg->vid_hdr.size + sizeof(struct pipe_msg_hdr),
56265- atomic64_inc_return(&request_id),
56266+ atomic64_inc_return_unchecked(&request_id),
56267 VM_PKT_DATA_INBAND, 0);
56268
56269 if (ret)
56270diff --git a/drivers/video/fbdev/i810/i810_accel.c b/drivers/video/fbdev/i810/i810_accel.c
56271index 7672d2e..b56437f 100644
56272--- a/drivers/video/fbdev/i810/i810_accel.c
56273+++ b/drivers/video/fbdev/i810/i810_accel.c
56274@@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
56275 }
56276 }
56277 printk("ringbuffer lockup!!!\n");
56278+ printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
56279 i810_report_error(mmio);
56280 par->dev_flags |= LOCKUP;
56281 info->pixmap.scan_align = 1;
56282diff --git a/drivers/video/fbdev/matrox/matroxfb_DAC1064.c b/drivers/video/fbdev/matrox/matroxfb_DAC1064.c
56283index a01147f..5d896f8 100644
56284--- a/drivers/video/fbdev/matrox/matroxfb_DAC1064.c
56285+++ b/drivers/video/fbdev/matrox/matroxfb_DAC1064.c
56286@@ -1088,14 +1088,20 @@ static void MGAG100_restore(struct matrox_fb_info *minfo)
56287
56288 #ifdef CONFIG_FB_MATROX_MYSTIQUE
56289 struct matrox_switch matrox_mystique = {
56290- MGA1064_preinit, MGA1064_reset, MGA1064_init, MGA1064_restore,
56291+ .preinit = MGA1064_preinit,
56292+ .reset = MGA1064_reset,
56293+ .init = MGA1064_init,
56294+ .restore = MGA1064_restore,
56295 };
56296 EXPORT_SYMBOL(matrox_mystique);
56297 #endif
56298
56299 #ifdef CONFIG_FB_MATROX_G
56300 struct matrox_switch matrox_G100 = {
56301- MGAG100_preinit, MGAG100_reset, MGAG100_init, MGAG100_restore,
56302+ .preinit = MGAG100_preinit,
56303+ .reset = MGAG100_reset,
56304+ .init = MGAG100_init,
56305+ .restore = MGAG100_restore,
56306 };
56307 EXPORT_SYMBOL(matrox_G100);
56308 #endif
56309diff --git a/drivers/video/fbdev/matrox/matroxfb_Ti3026.c b/drivers/video/fbdev/matrox/matroxfb_Ti3026.c
56310index 195ad7c..09743fc 100644
56311--- a/drivers/video/fbdev/matrox/matroxfb_Ti3026.c
56312+++ b/drivers/video/fbdev/matrox/matroxfb_Ti3026.c
56313@@ -738,7 +738,10 @@ static int Ti3026_preinit(struct matrox_fb_info *minfo)
56314 }
56315
56316 struct matrox_switch matrox_millennium = {
56317- Ti3026_preinit, Ti3026_reset, Ti3026_init, Ti3026_restore
56318+ .preinit = Ti3026_preinit,
56319+ .reset = Ti3026_reset,
56320+ .init = Ti3026_init,
56321+ .restore = Ti3026_restore
56322 };
56323 EXPORT_SYMBOL(matrox_millennium);
56324 #endif
56325diff --git a/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c b/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c
56326index fe92eed..106e085 100644
56327--- a/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c
56328+++ b/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c
56329@@ -312,14 +312,18 @@ void mb862xxfb_init_accel(struct fb_info *info, int xres)
56330 struct mb862xxfb_par *par = info->par;
56331
56332 if (info->var.bits_per_pixel == 32) {
56333- info->fbops->fb_fillrect = cfb_fillrect;
56334- info->fbops->fb_copyarea = cfb_copyarea;
56335- info->fbops->fb_imageblit = cfb_imageblit;
56336+ pax_open_kernel();
56337+ *(void **)&info->fbops->fb_fillrect = cfb_fillrect;
56338+ *(void **)&info->fbops->fb_copyarea = cfb_copyarea;
56339+ *(void **)&info->fbops->fb_imageblit = cfb_imageblit;
56340+ pax_close_kernel();
56341 } else {
56342 outreg(disp, GC_L0EM, 3);
56343- info->fbops->fb_fillrect = mb86290fb_fillrect;
56344- info->fbops->fb_copyarea = mb86290fb_copyarea;
56345- info->fbops->fb_imageblit = mb86290fb_imageblit;
56346+ pax_open_kernel();
56347+ *(void **)&info->fbops->fb_fillrect = mb86290fb_fillrect;
56348+ *(void **)&info->fbops->fb_copyarea = mb86290fb_copyarea;
56349+ *(void **)&info->fbops->fb_imageblit = mb86290fb_imageblit;
56350+ pax_close_kernel();
56351 }
56352 outreg(draw, GDC_REG_DRAW_BASE, 0);
56353 outreg(draw, GDC_REG_MODE_MISC, 0x8000);
56354diff --git a/drivers/video/fbdev/nvidia/nvidia.c b/drivers/video/fbdev/nvidia/nvidia.c
56355index def0412..fed6529 100644
56356--- a/drivers/video/fbdev/nvidia/nvidia.c
56357+++ b/drivers/video/fbdev/nvidia/nvidia.c
56358@@ -669,19 +669,23 @@ static int nvidiafb_set_par(struct fb_info *info)
56359 info->fix.line_length = (info->var.xres_virtual *
56360 info->var.bits_per_pixel) >> 3;
56361 if (info->var.accel_flags) {
56362- info->fbops->fb_imageblit = nvidiafb_imageblit;
56363- info->fbops->fb_fillrect = nvidiafb_fillrect;
56364- info->fbops->fb_copyarea = nvidiafb_copyarea;
56365- info->fbops->fb_sync = nvidiafb_sync;
56366+ pax_open_kernel();
56367+ *(void **)&info->fbops->fb_imageblit = nvidiafb_imageblit;
56368+ *(void **)&info->fbops->fb_fillrect = nvidiafb_fillrect;
56369+ *(void **)&info->fbops->fb_copyarea = nvidiafb_copyarea;
56370+ *(void **)&info->fbops->fb_sync = nvidiafb_sync;
56371+ pax_close_kernel();
56372 info->pixmap.scan_align = 4;
56373 info->flags &= ~FBINFO_HWACCEL_DISABLED;
56374 info->flags |= FBINFO_READS_FAST;
56375 NVResetGraphics(info);
56376 } else {
56377- info->fbops->fb_imageblit = cfb_imageblit;
56378- info->fbops->fb_fillrect = cfb_fillrect;
56379- info->fbops->fb_copyarea = cfb_copyarea;
56380- info->fbops->fb_sync = NULL;
56381+ pax_open_kernel();
56382+ *(void **)&info->fbops->fb_imageblit = cfb_imageblit;
56383+ *(void **)&info->fbops->fb_fillrect = cfb_fillrect;
56384+ *(void **)&info->fbops->fb_copyarea = cfb_copyarea;
56385+ *(void **)&info->fbops->fb_sync = NULL;
56386+ pax_close_kernel();
56387 info->pixmap.scan_align = 1;
56388 info->flags |= FBINFO_HWACCEL_DISABLED;
56389 info->flags &= ~FBINFO_READS_FAST;
56390@@ -1173,8 +1177,11 @@ static int nvidia_set_fbinfo(struct fb_info *info)
56391 info->pixmap.size = 8 * 1024;
56392 info->pixmap.flags = FB_PIXMAP_SYSTEM;
56393
56394- if (!hwcur)
56395- info->fbops->fb_cursor = NULL;
56396+ if (!hwcur) {
56397+ pax_open_kernel();
56398+ *(void **)&info->fbops->fb_cursor = NULL;
56399+ pax_close_kernel();
56400+ }
56401
56402 info->var.accel_flags = (!noaccel);
56403
56404diff --git a/drivers/video/fbdev/omap2/dss/display.c b/drivers/video/fbdev/omap2/dss/display.c
56405index 2412a0d..294215b 100644
56406--- a/drivers/video/fbdev/omap2/dss/display.c
56407+++ b/drivers/video/fbdev/omap2/dss/display.c
56408@@ -161,12 +161,14 @@ int omapdss_register_display(struct omap_dss_device *dssdev)
56409 if (dssdev->name == NULL)
56410 dssdev->name = dssdev->alias;
56411
56412+ pax_open_kernel();
56413 if (drv && drv->get_resolution == NULL)
56414- drv->get_resolution = omapdss_default_get_resolution;
56415+ *(void **)&drv->get_resolution = omapdss_default_get_resolution;
56416 if (drv && drv->get_recommended_bpp == NULL)
56417- drv->get_recommended_bpp = omapdss_default_get_recommended_bpp;
56418+ *(void **)&drv->get_recommended_bpp = omapdss_default_get_recommended_bpp;
56419 if (drv && drv->get_timings == NULL)
56420- drv->get_timings = omapdss_default_get_timings;
56421+ *(void **)&drv->get_timings = omapdss_default_get_timings;
56422+ pax_close_kernel();
56423
56424 mutex_lock(&panel_list_mutex);
56425 list_add_tail(&dssdev->panel_list, &panel_list);
56426diff --git a/drivers/video/fbdev/s1d13xxxfb.c b/drivers/video/fbdev/s1d13xxxfb.c
56427index 83433cb..71e9b98 100644
56428--- a/drivers/video/fbdev/s1d13xxxfb.c
56429+++ b/drivers/video/fbdev/s1d13xxxfb.c
56430@@ -881,8 +881,10 @@ static int s1d13xxxfb_probe(struct platform_device *pdev)
56431
56432 switch(prod_id) {
56433 case S1D13506_PROD_ID: /* activate acceleration */
56434- s1d13xxxfb_fbops.fb_fillrect = s1d13xxxfb_bitblt_solidfill;
56435- s1d13xxxfb_fbops.fb_copyarea = s1d13xxxfb_bitblt_copyarea;
56436+ pax_open_kernel();
56437+ *(void **)&s1d13xxxfb_fbops.fb_fillrect = s1d13xxxfb_bitblt_solidfill;
56438+ *(void **)&s1d13xxxfb_fbops.fb_copyarea = s1d13xxxfb_bitblt_copyarea;
56439+ pax_close_kernel();
56440 info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN |
56441 FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_COPYAREA;
56442 break;
56443diff --git a/drivers/video/fbdev/sh_mobile_lcdcfb.c b/drivers/video/fbdev/sh_mobile_lcdcfb.c
56444index d3013cd..95b8285 100644
56445--- a/drivers/video/fbdev/sh_mobile_lcdcfb.c
56446+++ b/drivers/video/fbdev/sh_mobile_lcdcfb.c
56447@@ -439,9 +439,9 @@ static unsigned long lcdc_sys_read_data(void *handle)
56448 }
56449
56450 static struct sh_mobile_lcdc_sys_bus_ops sh_mobile_lcdc_sys_bus_ops = {
56451- lcdc_sys_write_index,
56452- lcdc_sys_write_data,
56453- lcdc_sys_read_data,
56454+ .write_index = lcdc_sys_write_index,
56455+ .write_data = lcdc_sys_write_data,
56456+ .read_data = lcdc_sys_read_data,
56457 };
56458
56459 static int sh_mobile_lcdc_sginit(struct fb_info *info,
56460diff --git a/drivers/video/fbdev/smscufx.c b/drivers/video/fbdev/smscufx.c
56461index 9279e5f..d5f5276 100644
56462--- a/drivers/video/fbdev/smscufx.c
56463+++ b/drivers/video/fbdev/smscufx.c
56464@@ -1174,7 +1174,9 @@ static int ufx_ops_release(struct fb_info *info, int user)
56465 fb_deferred_io_cleanup(info);
56466 kfree(info->fbdefio);
56467 info->fbdefio = NULL;
56468- info->fbops->fb_mmap = ufx_ops_mmap;
56469+ pax_open_kernel();
56470+ *(void **)&info->fbops->fb_mmap = ufx_ops_mmap;
56471+ pax_close_kernel();
56472 }
56473
56474 pr_debug("released /dev/fb%d user=%d count=%d",
56475diff --git a/drivers/video/fbdev/udlfb.c b/drivers/video/fbdev/udlfb.c
56476index ff2b873..626a8d5 100644
56477--- a/drivers/video/fbdev/udlfb.c
56478+++ b/drivers/video/fbdev/udlfb.c
56479@@ -623,11 +623,11 @@ static int dlfb_handle_damage(struct dlfb_data *dev, int x, int y,
56480 dlfb_urb_completion(urb);
56481
56482 error:
56483- atomic_add(bytes_sent, &dev->bytes_sent);
56484- atomic_add(bytes_identical, &dev->bytes_identical);
56485- atomic_add(width*height*2, &dev->bytes_rendered);
56486+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
56487+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
56488+ atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
56489 end_cycles = get_cycles();
56490- atomic_add(((unsigned int) ((end_cycles - start_cycles)
56491+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
56492 >> 10)), /* Kcycles */
56493 &dev->cpu_kcycles_used);
56494
56495@@ -748,11 +748,11 @@ static void dlfb_dpy_deferred_io(struct fb_info *info,
56496 dlfb_urb_completion(urb);
56497
56498 error:
56499- atomic_add(bytes_sent, &dev->bytes_sent);
56500- atomic_add(bytes_identical, &dev->bytes_identical);
56501- atomic_add(bytes_rendered, &dev->bytes_rendered);
56502+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
56503+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
56504+ atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
56505 end_cycles = get_cycles();
56506- atomic_add(((unsigned int) ((end_cycles - start_cycles)
56507+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
56508 >> 10)), /* Kcycles */
56509 &dev->cpu_kcycles_used);
56510 }
56511@@ -991,7 +991,9 @@ static int dlfb_ops_release(struct fb_info *info, int user)
56512 fb_deferred_io_cleanup(info);
56513 kfree(info->fbdefio);
56514 info->fbdefio = NULL;
56515- info->fbops->fb_mmap = dlfb_ops_mmap;
56516+ pax_open_kernel();
56517+ *(void **)&info->fbops->fb_mmap = dlfb_ops_mmap;
56518+ pax_close_kernel();
56519 }
56520
56521 pr_warn("released /dev/fb%d user=%d count=%d\n",
56522@@ -1373,7 +1375,7 @@ static ssize_t metrics_bytes_rendered_show(struct device *fbdev,
56523 struct fb_info *fb_info = dev_get_drvdata(fbdev);
56524 struct dlfb_data *dev = fb_info->par;
56525 return snprintf(buf, PAGE_SIZE, "%u\n",
56526- atomic_read(&dev->bytes_rendered));
56527+ atomic_read_unchecked(&dev->bytes_rendered));
56528 }
56529
56530 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
56531@@ -1381,7 +1383,7 @@ static ssize_t metrics_bytes_identical_show(struct device *fbdev,
56532 struct fb_info *fb_info = dev_get_drvdata(fbdev);
56533 struct dlfb_data *dev = fb_info->par;
56534 return snprintf(buf, PAGE_SIZE, "%u\n",
56535- atomic_read(&dev->bytes_identical));
56536+ atomic_read_unchecked(&dev->bytes_identical));
56537 }
56538
56539 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
56540@@ -1389,7 +1391,7 @@ static ssize_t metrics_bytes_sent_show(struct device *fbdev,
56541 struct fb_info *fb_info = dev_get_drvdata(fbdev);
56542 struct dlfb_data *dev = fb_info->par;
56543 return snprintf(buf, PAGE_SIZE, "%u\n",
56544- atomic_read(&dev->bytes_sent));
56545+ atomic_read_unchecked(&dev->bytes_sent));
56546 }
56547
56548 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
56549@@ -1397,7 +1399,7 @@ static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
56550 struct fb_info *fb_info = dev_get_drvdata(fbdev);
56551 struct dlfb_data *dev = fb_info->par;
56552 return snprintf(buf, PAGE_SIZE, "%u\n",
56553- atomic_read(&dev->cpu_kcycles_used));
56554+ atomic_read_unchecked(&dev->cpu_kcycles_used));
56555 }
56556
56557 static ssize_t edid_show(
56558@@ -1457,10 +1459,10 @@ static ssize_t metrics_reset_store(struct device *fbdev,
56559 struct fb_info *fb_info = dev_get_drvdata(fbdev);
56560 struct dlfb_data *dev = fb_info->par;
56561
56562- atomic_set(&dev->bytes_rendered, 0);
56563- atomic_set(&dev->bytes_identical, 0);
56564- atomic_set(&dev->bytes_sent, 0);
56565- atomic_set(&dev->cpu_kcycles_used, 0);
56566+ atomic_set_unchecked(&dev->bytes_rendered, 0);
56567+ atomic_set_unchecked(&dev->bytes_identical, 0);
56568+ atomic_set_unchecked(&dev->bytes_sent, 0);
56569+ atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
56570
56571 return count;
56572 }
56573diff --git a/drivers/video/fbdev/uvesafb.c b/drivers/video/fbdev/uvesafb.c
56574index d32d1c4..46722e6 100644
56575--- a/drivers/video/fbdev/uvesafb.c
56576+++ b/drivers/video/fbdev/uvesafb.c
56577@@ -19,6 +19,7 @@
56578 #include <linux/io.h>
56579 #include <linux/mutex.h>
56580 #include <linux/slab.h>
56581+#include <linux/moduleloader.h>
56582 #include <video/edid.h>
56583 #include <video/uvesafb.h>
56584 #ifdef CONFIG_X86
56585@@ -565,10 +566,32 @@ static int uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
56586 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
56587 par->pmi_setpal = par->ypan = 0;
56588 } else {
56589+
56590+#ifdef CONFIG_PAX_KERNEXEC
56591+#ifdef CONFIG_MODULES
56592+ par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
56593+#endif
56594+ if (!par->pmi_code) {
56595+ par->pmi_setpal = par->ypan = 0;
56596+ return 0;
56597+ }
56598+#endif
56599+
56600 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
56601 + task->t.regs.edi);
56602+
56603+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
56604+ pax_open_kernel();
56605+ memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
56606+ pax_close_kernel();
56607+
56608+ par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
56609+ par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
56610+#else
56611 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
56612 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
56613+#endif
56614+
56615 printk(KERN_INFO "uvesafb: protected mode interface info at "
56616 "%04x:%04x\n",
56617 (u16)task->t.regs.es, (u16)task->t.regs.edi);
56618@@ -813,13 +836,14 @@ static int uvesafb_vbe_init(struct fb_info *info)
56619 par->ypan = ypan;
56620
56621 if (par->pmi_setpal || par->ypan) {
56622+#if !defined(CONFIG_MODULES) || !defined(CONFIG_PAX_KERNEXEC)
56623 if (__supported_pte_mask & _PAGE_NX) {
56624 par->pmi_setpal = par->ypan = 0;
56625 printk(KERN_WARNING "uvesafb: NX protection is active, "
56626 "better not use the PMI.\n");
56627- } else {
56628+ } else
56629+#endif
56630 uvesafb_vbe_getpmi(task, par);
56631- }
56632 }
56633 #else
56634 /* The protected mode interface is not available on non-x86. */
56635@@ -1452,8 +1476,11 @@ static void uvesafb_init_info(struct fb_info *info, struct vbe_mode_ib *mode)
56636 info->fix.ywrapstep = (par->ypan > 1) ? 1 : 0;
56637
56638 /* Disable blanking if the user requested so. */
56639- if (!blank)
56640- info->fbops->fb_blank = NULL;
56641+ if (!blank) {
56642+ pax_open_kernel();
56643+ *(void **)&info->fbops->fb_blank = NULL;
56644+ pax_close_kernel();
56645+ }
56646
56647 /*
56648 * Find out how much IO memory is required for the mode with
56649@@ -1524,8 +1551,11 @@ static void uvesafb_init_info(struct fb_info *info, struct vbe_mode_ib *mode)
56650 info->flags = FBINFO_FLAG_DEFAULT |
56651 (par->ypan ? FBINFO_HWACCEL_YPAN : 0);
56652
56653- if (!par->ypan)
56654- info->fbops->fb_pan_display = NULL;
56655+ if (!par->ypan) {
56656+ pax_open_kernel();
56657+ *(void **)&info->fbops->fb_pan_display = NULL;
56658+ pax_close_kernel();
56659+ }
56660 }
56661
56662 static void uvesafb_init_mtrr(struct fb_info *info)
56663@@ -1786,6 +1816,11 @@ out_mode:
56664 out:
56665 kfree(par->vbe_modes);
56666
56667+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
56668+ if (par->pmi_code)
56669+ module_memfree_exec(par->pmi_code);
56670+#endif
56671+
56672 framebuffer_release(info);
56673 return err;
56674 }
56675@@ -1810,6 +1845,11 @@ static int uvesafb_remove(struct platform_device *dev)
56676 kfree(par->vbe_state_orig);
56677 kfree(par->vbe_state_saved);
56678
56679+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
56680+ if (par->pmi_code)
56681+ module_memfree_exec(par->pmi_code);
56682+#endif
56683+
56684 framebuffer_release(info);
56685 }
56686 return 0;
56687diff --git a/drivers/video/fbdev/vesafb.c b/drivers/video/fbdev/vesafb.c
56688index d79a0ac..2d0c3d4 100644
56689--- a/drivers/video/fbdev/vesafb.c
56690+++ b/drivers/video/fbdev/vesafb.c
56691@@ -9,6 +9,7 @@
56692 */
56693
56694 #include <linux/module.h>
56695+#include <linux/moduleloader.h>
56696 #include <linux/kernel.h>
56697 #include <linux/errno.h>
56698 #include <linux/string.h>
56699@@ -52,8 +53,8 @@ static int vram_remap; /* Set amount of memory to be used */
56700 static int vram_total; /* Set total amount of memory */
56701 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
56702 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
56703-static void (*pmi_start)(void) __read_mostly;
56704-static void (*pmi_pal) (void) __read_mostly;
56705+static void (*pmi_start)(void) __read_only;
56706+static void (*pmi_pal) (void) __read_only;
56707 static int depth __read_mostly;
56708 static int vga_compat __read_mostly;
56709 /* --------------------------------------------------------------------- */
56710@@ -233,6 +234,7 @@ static int vesafb_probe(struct platform_device *dev)
56711 unsigned int size_remap;
56712 unsigned int size_total;
56713 char *option = NULL;
56714+ void *pmi_code = NULL;
56715
56716 /* ignore error return of fb_get_options */
56717 fb_get_options("vesafb", &option);
56718@@ -279,10 +281,6 @@ static int vesafb_probe(struct platform_device *dev)
56719 size_remap = size_total;
56720 vesafb_fix.smem_len = size_remap;
56721
56722-#ifndef __i386__
56723- screen_info.vesapm_seg = 0;
56724-#endif
56725-
56726 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
56727 printk(KERN_WARNING
56728 "vesafb: cannot reserve video memory at 0x%lx\n",
56729@@ -312,9 +310,21 @@ static int vesafb_probe(struct platform_device *dev)
56730 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
56731 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
56732
56733+#ifdef __i386__
56734+
56735+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
56736+ pmi_code = module_alloc_exec(screen_info.vesapm_size);
56737+ if (!pmi_code)
56738+#elif !defined(CONFIG_PAX_KERNEXEC)
56739+ if (0)
56740+#endif
56741+
56742+#endif
56743+ screen_info.vesapm_seg = 0;
56744+
56745 if (screen_info.vesapm_seg) {
56746- printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
56747- screen_info.vesapm_seg,screen_info.vesapm_off);
56748+ printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
56749+ screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
56750 }
56751
56752 if (screen_info.vesapm_seg < 0xc000)
56753@@ -322,9 +332,25 @@ static int vesafb_probe(struct platform_device *dev)
56754
56755 if (ypan || pmi_setpal) {
56756 unsigned short *pmi_base;
56757+
56758 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
56759- pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
56760- pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
56761+
56762+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
56763+ pax_open_kernel();
56764+ memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
56765+#else
56766+ pmi_code = pmi_base;
56767+#endif
56768+
56769+ pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
56770+ pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
56771+
56772+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
56773+ pmi_start = ktva_ktla(pmi_start);
56774+ pmi_pal = ktva_ktla(pmi_pal);
56775+ pax_close_kernel();
56776+#endif
56777+
56778 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
56779 if (pmi_base[3]) {
56780 printk(KERN_INFO "vesafb: pmi: ports = ");
56781@@ -477,8 +503,11 @@ static int vesafb_probe(struct platform_device *dev)
56782 info->flags = FBINFO_FLAG_DEFAULT | FBINFO_MISC_FIRMWARE |
56783 (ypan ? FBINFO_HWACCEL_YPAN : 0);
56784
56785- if (!ypan)
56786- info->fbops->fb_pan_display = NULL;
56787+ if (!ypan) {
56788+ pax_open_kernel();
56789+ *(void **)&info->fbops->fb_pan_display = NULL;
56790+ pax_close_kernel();
56791+ }
56792
56793 if (fb_alloc_cmap(&info->cmap, 256, 0) < 0) {
56794 err = -ENOMEM;
56795@@ -492,6 +521,11 @@ static int vesafb_probe(struct platform_device *dev)
56796 fb_info(info, "%s frame buffer device\n", info->fix.id);
56797 return 0;
56798 err:
56799+
56800+#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
56801+ module_memfree_exec(pmi_code);
56802+#endif
56803+
56804 if (info->screen_base)
56805 iounmap(info->screen_base);
56806 framebuffer_release(info);
56807diff --git a/drivers/video/fbdev/via/via_clock.h b/drivers/video/fbdev/via/via_clock.h
56808index 88714ae..16c2e11 100644
56809--- a/drivers/video/fbdev/via/via_clock.h
56810+++ b/drivers/video/fbdev/via/via_clock.h
56811@@ -56,7 +56,7 @@ struct via_clock {
56812
56813 void (*set_engine_pll_state)(u8 state);
56814 void (*set_engine_pll)(struct via_pll_config config);
56815-};
56816+} __no_const;
56817
56818
56819 static inline u32 get_pll_internal_frequency(u32 ref_freq,
56820diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm
56821index 3c14e43..2630570 100644
56822--- a/drivers/video/logo/logo_linux_clut224.ppm
56823+++ b/drivers/video/logo/logo_linux_clut224.ppm
56824@@ -2,1603 +2,1123 @@ P3
56825 # Standard 224-color Linux logo
56826 80 80
56827 255
56828- 0 0 0 0 0 0 0 0 0 0 0 0
56829- 0 0 0 0 0 0 0 0 0 0 0 0
56830- 0 0 0 0 0 0 0 0 0 0 0 0
56831- 0 0 0 0 0 0 0 0 0 0 0 0
56832- 0 0 0 0 0 0 0 0 0 0 0 0
56833- 0 0 0 0 0 0 0 0 0 0 0 0
56834- 0 0 0 0 0 0 0 0 0 0 0 0
56835- 0 0 0 0 0 0 0 0 0 0 0 0
56836- 0 0 0 0 0 0 0 0 0 0 0 0
56837- 6 6 6 6 6 6 10 10 10 10 10 10
56838- 10 10 10 6 6 6 6 6 6 6 6 6
56839- 0 0 0 0 0 0 0 0 0 0 0 0
56840- 0 0 0 0 0 0 0 0 0 0 0 0
56841- 0 0 0 0 0 0 0 0 0 0 0 0
56842- 0 0 0 0 0 0 0 0 0 0 0 0
56843- 0 0 0 0 0 0 0 0 0 0 0 0
56844- 0 0 0 0 0 0 0 0 0 0 0 0
56845- 0 0 0 0 0 0 0 0 0 0 0 0
56846- 0 0 0 0 0 0 0 0 0 0 0 0
56847- 0 0 0 0 0 0 0 0 0 0 0 0
56848- 0 0 0 0 0 0 0 0 0 0 0 0
56849- 0 0 0 0 0 0 0 0 0 0 0 0
56850- 0 0 0 0 0 0 0 0 0 0 0 0
56851- 0 0 0 0 0 0 0 0 0 0 0 0
56852- 0 0 0 0 0 0 0 0 0 0 0 0
56853- 0 0 0 0 0 0 0 0 0 0 0 0
56854- 0 0 0 0 0 0 0 0 0 0 0 0
56855- 0 0 0 0 0 0 0 0 0 0 0 0
56856- 0 0 0 6 6 6 10 10 10 14 14 14
56857- 22 22 22 26 26 26 30 30 30 34 34 34
56858- 30 30 30 30 30 30 26 26 26 18 18 18
56859- 14 14 14 10 10 10 6 6 6 0 0 0
56860- 0 0 0 0 0 0 0 0 0 0 0 0
56861- 0 0 0 0 0 0 0 0 0 0 0 0
56862- 0 0 0 0 0 0 0 0 0 0 0 0
56863- 0 0 0 0 0 0 0 0 0 0 0 0
56864- 0 0 0 0 0 0 0 0 0 0 0 0
56865- 0 0 0 0 0 0 0 0 0 0 0 0
56866- 0 0 0 0 0 0 0 0 0 0 0 0
56867- 0 0 0 0 0 0 0 0 0 0 0 0
56868- 0 0 0 0 0 0 0 0 0 0 0 0
56869- 0 0 0 0 0 1 0 0 1 0 0 0
56870- 0 0 0 0 0 0 0 0 0 0 0 0
56871- 0 0 0 0 0 0 0 0 0 0 0 0
56872- 0 0 0 0 0 0 0 0 0 0 0 0
56873- 0 0 0 0 0 0 0 0 0 0 0 0
56874- 0 0 0 0 0 0 0 0 0 0 0 0
56875- 0 0 0 0 0 0 0 0 0 0 0 0
56876- 6 6 6 14 14 14 26 26 26 42 42 42
56877- 54 54 54 66 66 66 78 78 78 78 78 78
56878- 78 78 78 74 74 74 66 66 66 54 54 54
56879- 42 42 42 26 26 26 18 18 18 10 10 10
56880- 6 6 6 0 0 0 0 0 0 0 0 0
56881- 0 0 0 0 0 0 0 0 0 0 0 0
56882- 0 0 0 0 0 0 0 0 0 0 0 0
56883- 0 0 0 0 0 0 0 0 0 0 0 0
56884- 0 0 0 0 0 0 0 0 0 0 0 0
56885- 0 0 0 0 0 0 0 0 0 0 0 0
56886- 0 0 0 0 0 0 0 0 0 0 0 0
56887- 0 0 0 0 0 0 0 0 0 0 0 0
56888- 0 0 0 0 0 0 0 0 0 0 0 0
56889- 0 0 1 0 0 0 0 0 0 0 0 0
56890- 0 0 0 0 0 0 0 0 0 0 0 0
56891- 0 0 0 0 0 0 0 0 0 0 0 0
56892- 0 0 0 0 0 0 0 0 0 0 0 0
56893- 0 0 0 0 0 0 0 0 0 0 0 0
56894- 0 0 0 0 0 0 0 0 0 0 0 0
56895- 0 0 0 0 0 0 0 0 0 10 10 10
56896- 22 22 22 42 42 42 66 66 66 86 86 86
56897- 66 66 66 38 38 38 38 38 38 22 22 22
56898- 26 26 26 34 34 34 54 54 54 66 66 66
56899- 86 86 86 70 70 70 46 46 46 26 26 26
56900- 14 14 14 6 6 6 0 0 0 0 0 0
56901- 0 0 0 0 0 0 0 0 0 0 0 0
56902- 0 0 0 0 0 0 0 0 0 0 0 0
56903- 0 0 0 0 0 0 0 0 0 0 0 0
56904- 0 0 0 0 0 0 0 0 0 0 0 0
56905- 0 0 0 0 0 0 0 0 0 0 0 0
56906- 0 0 0 0 0 0 0 0 0 0 0 0
56907- 0 0 0 0 0 0 0 0 0 0 0 0
56908- 0 0 0 0 0 0 0 0 0 0 0 0
56909- 0 0 1 0 0 1 0 0 1 0 0 0
56910- 0 0 0 0 0 0 0 0 0 0 0 0
56911- 0 0 0 0 0 0 0 0 0 0 0 0
56912- 0 0 0 0 0 0 0 0 0 0 0 0
56913- 0 0 0 0 0 0 0 0 0 0 0 0
56914- 0 0 0 0 0 0 0 0 0 0 0 0
56915- 0 0 0 0 0 0 10 10 10 26 26 26
56916- 50 50 50 82 82 82 58 58 58 6 6 6
56917- 2 2 6 2 2 6 2 2 6 2 2 6
56918- 2 2 6 2 2 6 2 2 6 2 2 6
56919- 6 6 6 54 54 54 86 86 86 66 66 66
56920- 38 38 38 18 18 18 6 6 6 0 0 0
56921- 0 0 0 0 0 0 0 0 0 0 0 0
56922- 0 0 0 0 0 0 0 0 0 0 0 0
56923- 0 0 0 0 0 0 0 0 0 0 0 0
56924- 0 0 0 0 0 0 0 0 0 0 0 0
56925- 0 0 0 0 0 0 0 0 0 0 0 0
56926- 0 0 0 0 0 0 0 0 0 0 0 0
56927- 0 0 0 0 0 0 0 0 0 0 0 0
56928- 0 0 0 0 0 0 0 0 0 0 0 0
56929- 0 0 0 0 0 0 0 0 0 0 0 0
56930- 0 0 0 0 0 0 0 0 0 0 0 0
56931- 0 0 0 0 0 0 0 0 0 0 0 0
56932- 0 0 0 0 0 0 0 0 0 0 0 0
56933- 0 0 0 0 0 0 0 0 0 0 0 0
56934- 0 0 0 0 0 0 0 0 0 0 0 0
56935- 0 0 0 6 6 6 22 22 22 50 50 50
56936- 78 78 78 34 34 34 2 2 6 2 2 6
56937- 2 2 6 2 2 6 2 2 6 2 2 6
56938- 2 2 6 2 2 6 2 2 6 2 2 6
56939- 2 2 6 2 2 6 6 6 6 70 70 70
56940- 78 78 78 46 46 46 22 22 22 6 6 6
56941- 0 0 0 0 0 0 0 0 0 0 0 0
56942- 0 0 0 0 0 0 0 0 0 0 0 0
56943- 0 0 0 0 0 0 0 0 0 0 0 0
56944- 0 0 0 0 0 0 0 0 0 0 0 0
56945- 0 0 0 0 0 0 0 0 0 0 0 0
56946- 0 0 0 0 0 0 0 0 0 0 0 0
56947- 0 0 0 0 0 0 0 0 0 0 0 0
56948- 0 0 0 0 0 0 0 0 0 0 0 0
56949- 0 0 1 0 0 1 0 0 1 0 0 0
56950- 0 0 0 0 0 0 0 0 0 0 0 0
56951- 0 0 0 0 0 0 0 0 0 0 0 0
56952- 0 0 0 0 0 0 0 0 0 0 0 0
56953- 0 0 0 0 0 0 0 0 0 0 0 0
56954- 0 0 0 0 0 0 0 0 0 0 0 0
56955- 6 6 6 18 18 18 42 42 42 82 82 82
56956- 26 26 26 2 2 6 2 2 6 2 2 6
56957- 2 2 6 2 2 6 2 2 6 2 2 6
56958- 2 2 6 2 2 6 2 2 6 14 14 14
56959- 46 46 46 34 34 34 6 6 6 2 2 6
56960- 42 42 42 78 78 78 42 42 42 18 18 18
56961- 6 6 6 0 0 0 0 0 0 0 0 0
56962- 0 0 0 0 0 0 0 0 0 0 0 0
56963- 0 0 0 0 0 0 0 0 0 0 0 0
56964- 0 0 0 0 0 0 0 0 0 0 0 0
56965- 0 0 0 0 0 0 0 0 0 0 0 0
56966- 0 0 0 0 0 0 0 0 0 0 0 0
56967- 0 0 0 0 0 0 0 0 0 0 0 0
56968- 0 0 0 0 0 0 0 0 0 0 0 0
56969- 0 0 1 0 0 0 0 0 1 0 0 0
56970- 0 0 0 0 0 0 0 0 0 0 0 0
56971- 0 0 0 0 0 0 0 0 0 0 0 0
56972- 0 0 0 0 0 0 0 0 0 0 0 0
56973- 0 0 0 0 0 0 0 0 0 0 0 0
56974- 0 0 0 0 0 0 0 0 0 0 0 0
56975- 10 10 10 30 30 30 66 66 66 58 58 58
56976- 2 2 6 2 2 6 2 2 6 2 2 6
56977- 2 2 6 2 2 6 2 2 6 2 2 6
56978- 2 2 6 2 2 6 2 2 6 26 26 26
56979- 86 86 86 101 101 101 46 46 46 10 10 10
56980- 2 2 6 58 58 58 70 70 70 34 34 34
56981- 10 10 10 0 0 0 0 0 0 0 0 0
56982- 0 0 0 0 0 0 0 0 0 0 0 0
56983- 0 0 0 0 0 0 0 0 0 0 0 0
56984- 0 0 0 0 0 0 0 0 0 0 0 0
56985- 0 0 0 0 0 0 0 0 0 0 0 0
56986- 0 0 0 0 0 0 0 0 0 0 0 0
56987- 0 0 0 0 0 0 0 0 0 0 0 0
56988- 0 0 0 0 0 0 0 0 0 0 0 0
56989- 0 0 1 0 0 1 0 0 1 0 0 0
56990- 0 0 0 0 0 0 0 0 0 0 0 0
56991- 0 0 0 0 0 0 0 0 0 0 0 0
56992- 0 0 0 0 0 0 0 0 0 0 0 0
56993- 0 0 0 0 0 0 0 0 0 0 0 0
56994- 0 0 0 0 0 0 0 0 0 0 0 0
56995- 14 14 14 42 42 42 86 86 86 10 10 10
56996- 2 2 6 2 2 6 2 2 6 2 2 6
56997- 2 2 6 2 2 6 2 2 6 2 2 6
56998- 2 2 6 2 2 6 2 2 6 30 30 30
56999- 94 94 94 94 94 94 58 58 58 26 26 26
57000- 2 2 6 6 6 6 78 78 78 54 54 54
57001- 22 22 22 6 6 6 0 0 0 0 0 0
57002- 0 0 0 0 0 0 0 0 0 0 0 0
57003- 0 0 0 0 0 0 0 0 0 0 0 0
57004- 0 0 0 0 0 0 0 0 0 0 0 0
57005- 0 0 0 0 0 0 0 0 0 0 0 0
57006- 0 0 0 0 0 0 0 0 0 0 0 0
57007- 0 0 0 0 0 0 0 0 0 0 0 0
57008- 0 0 0 0 0 0 0 0 0 0 0 0
57009- 0 0 0 0 0 0 0 0 0 0 0 0
57010- 0 0 0 0 0 0 0 0 0 0 0 0
57011- 0 0 0 0 0 0 0 0 0 0 0 0
57012- 0 0 0 0 0 0 0 0 0 0 0 0
57013- 0 0 0 0 0 0 0 0 0 0 0 0
57014- 0 0 0 0 0 0 0 0 0 6 6 6
57015- 22 22 22 62 62 62 62 62 62 2 2 6
57016- 2 2 6 2 2 6 2 2 6 2 2 6
57017- 2 2 6 2 2 6 2 2 6 2 2 6
57018- 2 2 6 2 2 6 2 2 6 26 26 26
57019- 54 54 54 38 38 38 18 18 18 10 10 10
57020- 2 2 6 2 2 6 34 34 34 82 82 82
57021- 38 38 38 14 14 14 0 0 0 0 0 0
57022- 0 0 0 0 0 0 0 0 0 0 0 0
57023- 0 0 0 0 0 0 0 0 0 0 0 0
57024- 0 0 0 0 0 0 0 0 0 0 0 0
57025- 0 0 0 0 0 0 0 0 0 0 0 0
57026- 0 0 0 0 0 0 0 0 0 0 0 0
57027- 0 0 0 0 0 0 0 0 0 0 0 0
57028- 0 0 0 0 0 0 0 0 0 0 0 0
57029- 0 0 0 0 0 1 0 0 1 0 0 0
57030- 0 0 0 0 0 0 0 0 0 0 0 0
57031- 0 0 0 0 0 0 0 0 0 0 0 0
57032- 0 0 0 0 0 0 0 0 0 0 0 0
57033- 0 0 0 0 0 0 0 0 0 0 0 0
57034- 0 0 0 0 0 0 0 0 0 6 6 6
57035- 30 30 30 78 78 78 30 30 30 2 2 6
57036- 2 2 6 2 2 6 2 2 6 2 2 6
57037- 2 2 6 2 2 6 2 2 6 2 2 6
57038- 2 2 6 2 2 6 2 2 6 10 10 10
57039- 10 10 10 2 2 6 2 2 6 2 2 6
57040- 2 2 6 2 2 6 2 2 6 78 78 78
57041- 50 50 50 18 18 18 6 6 6 0 0 0
57042- 0 0 0 0 0 0 0 0 0 0 0 0
57043- 0 0 0 0 0 0 0 0 0 0 0 0
57044- 0 0 0 0 0 0 0 0 0 0 0 0
57045- 0 0 0 0 0 0 0 0 0 0 0 0
57046- 0 0 0 0 0 0 0 0 0 0 0 0
57047- 0 0 0 0 0 0 0 0 0 0 0 0
57048- 0 0 0 0 0 0 0 0 0 0 0 0
57049- 0 0 1 0 0 0 0 0 0 0 0 0
57050- 0 0 0 0 0 0 0 0 0 0 0 0
57051- 0 0 0 0 0 0 0 0 0 0 0 0
57052- 0 0 0 0 0 0 0 0 0 0 0 0
57053- 0 0 0 0 0 0 0 0 0 0 0 0
57054- 0 0 0 0 0 0 0 0 0 10 10 10
57055- 38 38 38 86 86 86 14 14 14 2 2 6
57056- 2 2 6 2 2 6 2 2 6 2 2 6
57057- 2 2 6 2 2 6 2 2 6 2 2 6
57058- 2 2 6 2 2 6 2 2 6 2 2 6
57059- 2 2 6 2 2 6 2 2 6 2 2 6
57060- 2 2 6 2 2 6 2 2 6 54 54 54
57061- 66 66 66 26 26 26 6 6 6 0 0 0
57062- 0 0 0 0 0 0 0 0 0 0 0 0
57063- 0 0 0 0 0 0 0 0 0 0 0 0
57064- 0 0 0 0 0 0 0 0 0 0 0 0
57065- 0 0 0 0 0 0 0 0 0 0 0 0
57066- 0 0 0 0 0 0 0 0 0 0 0 0
57067- 0 0 0 0 0 0 0 0 0 0 0 0
57068- 0 0 0 0 0 0 0 0 0 0 0 0
57069- 0 0 0 0 0 1 0 0 1 0 0 0
57070- 0 0 0 0 0 0 0 0 0 0 0 0
57071- 0 0 0 0 0 0 0 0 0 0 0 0
57072- 0 0 0 0 0 0 0 0 0 0 0 0
57073- 0 0 0 0 0 0 0 0 0 0 0 0
57074- 0 0 0 0 0 0 0 0 0 14 14 14
57075- 42 42 42 82 82 82 2 2 6 2 2 6
57076- 2 2 6 6 6 6 10 10 10 2 2 6
57077- 2 2 6 2 2 6 2 2 6 2 2 6
57078- 2 2 6 2 2 6 2 2 6 6 6 6
57079- 14 14 14 10 10 10 2 2 6 2 2 6
57080- 2 2 6 2 2 6 2 2 6 18 18 18
57081- 82 82 82 34 34 34 10 10 10 0 0 0
57082- 0 0 0 0 0 0 0 0 0 0 0 0
57083- 0 0 0 0 0 0 0 0 0 0 0 0
57084- 0 0 0 0 0 0 0 0 0 0 0 0
57085- 0 0 0 0 0 0 0 0 0 0 0 0
57086- 0 0 0 0 0 0 0 0 0 0 0 0
57087- 0 0 0 0 0 0 0 0 0 0 0 0
57088- 0 0 0 0 0 0 0 0 0 0 0 0
57089- 0 0 1 0 0 0 0 0 0 0 0 0
57090- 0 0 0 0 0 0 0 0 0 0 0 0
57091- 0 0 0 0 0 0 0 0 0 0 0 0
57092- 0 0 0 0 0 0 0 0 0 0 0 0
57093- 0 0 0 0 0 0 0 0 0 0 0 0
57094- 0 0 0 0 0 0 0 0 0 14 14 14
57095- 46 46 46 86 86 86 2 2 6 2 2 6
57096- 6 6 6 6 6 6 22 22 22 34 34 34
57097- 6 6 6 2 2 6 2 2 6 2 2 6
57098- 2 2 6 2 2 6 18 18 18 34 34 34
57099- 10 10 10 50 50 50 22 22 22 2 2 6
57100- 2 2 6 2 2 6 2 2 6 10 10 10
57101- 86 86 86 42 42 42 14 14 14 0 0 0
57102- 0 0 0 0 0 0 0 0 0 0 0 0
57103- 0 0 0 0 0 0 0 0 0 0 0 0
57104- 0 0 0 0 0 0 0 0 0 0 0 0
57105- 0 0 0 0 0 0 0 0 0 0 0 0
57106- 0 0 0 0 0 0 0 0 0 0 0 0
57107- 0 0 0 0 0 0 0 0 0 0 0 0
57108- 0 0 0 0 0 0 0 0 0 0 0 0
57109- 0 0 1 0 0 1 0 0 1 0 0 0
57110- 0 0 0 0 0 0 0 0 0 0 0 0
57111- 0 0 0 0 0 0 0 0 0 0 0 0
57112- 0 0 0 0 0 0 0 0 0 0 0 0
57113- 0 0 0 0 0 0 0 0 0 0 0 0
57114- 0 0 0 0 0 0 0 0 0 14 14 14
57115- 46 46 46 86 86 86 2 2 6 2 2 6
57116- 38 38 38 116 116 116 94 94 94 22 22 22
57117- 22 22 22 2 2 6 2 2 6 2 2 6
57118- 14 14 14 86 86 86 138 138 138 162 162 162
57119-154 154 154 38 38 38 26 26 26 6 6 6
57120- 2 2 6 2 2 6 2 2 6 2 2 6
57121- 86 86 86 46 46 46 14 14 14 0 0 0
57122- 0 0 0 0 0 0 0 0 0 0 0 0
57123- 0 0 0 0 0 0 0 0 0 0 0 0
57124- 0 0 0 0 0 0 0 0 0 0 0 0
57125- 0 0 0 0 0 0 0 0 0 0 0 0
57126- 0 0 0 0 0 0 0 0 0 0 0 0
57127- 0 0 0 0 0 0 0 0 0 0 0 0
57128- 0 0 0 0 0 0 0 0 0 0 0 0
57129- 0 0 0 0 0 0 0 0 0 0 0 0
57130- 0 0 0 0 0 0 0 0 0 0 0 0
57131- 0 0 0 0 0 0 0 0 0 0 0 0
57132- 0 0 0 0 0 0 0 0 0 0 0 0
57133- 0 0 0 0 0 0 0 0 0 0 0 0
57134- 0 0 0 0 0 0 0 0 0 14 14 14
57135- 46 46 46 86 86 86 2 2 6 14 14 14
57136-134 134 134 198 198 198 195 195 195 116 116 116
57137- 10 10 10 2 2 6 2 2 6 6 6 6
57138-101 98 89 187 187 187 210 210 210 218 218 218
57139-214 214 214 134 134 134 14 14 14 6 6 6
57140- 2 2 6 2 2 6 2 2 6 2 2 6
57141- 86 86 86 50 50 50 18 18 18 6 6 6
57142- 0 0 0 0 0 0 0 0 0 0 0 0
57143- 0 0 0 0 0 0 0 0 0 0 0 0
57144- 0 0 0 0 0 0 0 0 0 0 0 0
57145- 0 0 0 0 0 0 0 0 0 0 0 0
57146- 0 0 0 0 0 0 0 0 0 0 0 0
57147- 0 0 0 0 0 0 0 0 0 0 0 0
57148- 0 0 0 0 0 0 0 0 1 0 0 0
57149- 0 0 1 0 0 1 0 0 1 0 0 0
57150- 0 0 0 0 0 0 0 0 0 0 0 0
57151- 0 0 0 0 0 0 0 0 0 0 0 0
57152- 0 0 0 0 0 0 0 0 0 0 0 0
57153- 0 0 0 0 0 0 0 0 0 0 0 0
57154- 0 0 0 0 0 0 0 0 0 14 14 14
57155- 46 46 46 86 86 86 2 2 6 54 54 54
57156-218 218 218 195 195 195 226 226 226 246 246 246
57157- 58 58 58 2 2 6 2 2 6 30 30 30
57158-210 210 210 253 253 253 174 174 174 123 123 123
57159-221 221 221 234 234 234 74 74 74 2 2 6
57160- 2 2 6 2 2 6 2 2 6 2 2 6
57161- 70 70 70 58 58 58 22 22 22 6 6 6
57162- 0 0 0 0 0 0 0 0 0 0 0 0
57163- 0 0 0 0 0 0 0 0 0 0 0 0
57164- 0 0 0 0 0 0 0 0 0 0 0 0
57165- 0 0 0 0 0 0 0 0 0 0 0 0
57166- 0 0 0 0 0 0 0 0 0 0 0 0
57167- 0 0 0 0 0 0 0 0 0 0 0 0
57168- 0 0 0 0 0 0 0 0 0 0 0 0
57169- 0 0 0 0 0 0 0 0 0 0 0 0
57170- 0 0 0 0 0 0 0 0 0 0 0 0
57171- 0 0 0 0 0 0 0 0 0 0 0 0
57172- 0 0 0 0 0 0 0 0 0 0 0 0
57173- 0 0 0 0 0 0 0 0 0 0 0 0
57174- 0 0 0 0 0 0 0 0 0 14 14 14
57175- 46 46 46 82 82 82 2 2 6 106 106 106
57176-170 170 170 26 26 26 86 86 86 226 226 226
57177-123 123 123 10 10 10 14 14 14 46 46 46
57178-231 231 231 190 190 190 6 6 6 70 70 70
57179- 90 90 90 238 238 238 158 158 158 2 2 6
57180- 2 2 6 2 2 6 2 2 6 2 2 6
57181- 70 70 70 58 58 58 22 22 22 6 6 6
57182- 0 0 0 0 0 0 0 0 0 0 0 0
57183- 0 0 0 0 0 0 0 0 0 0 0 0
57184- 0 0 0 0 0 0 0 0 0 0 0 0
57185- 0 0 0 0 0 0 0 0 0 0 0 0
57186- 0 0 0 0 0 0 0 0 0 0 0 0
57187- 0 0 0 0 0 0 0 0 0 0 0 0
57188- 0 0 0 0 0 0 0 0 1 0 0 0
57189- 0 0 1 0 0 1 0 0 1 0 0 0
57190- 0 0 0 0 0 0 0 0 0 0 0 0
57191- 0 0 0 0 0 0 0 0 0 0 0 0
57192- 0 0 0 0 0 0 0 0 0 0 0 0
57193- 0 0 0 0 0 0 0 0 0 0 0 0
57194- 0 0 0 0 0 0 0 0 0 14 14 14
57195- 42 42 42 86 86 86 6 6 6 116 116 116
57196-106 106 106 6 6 6 70 70 70 149 149 149
57197-128 128 128 18 18 18 38 38 38 54 54 54
57198-221 221 221 106 106 106 2 2 6 14 14 14
57199- 46 46 46 190 190 190 198 198 198 2 2 6
57200- 2 2 6 2 2 6 2 2 6 2 2 6
57201- 74 74 74 62 62 62 22 22 22 6 6 6
57202- 0 0 0 0 0 0 0 0 0 0 0 0
57203- 0 0 0 0 0 0 0 0 0 0 0 0
57204- 0 0 0 0 0 0 0 0 0 0 0 0
57205- 0 0 0 0 0 0 0 0 0 0 0 0
57206- 0 0 0 0 0 0 0 0 0 0 0 0
57207- 0 0 0 0 0 0 0 0 0 0 0 0
57208- 0 0 0 0 0 0 0 0 1 0 0 0
57209- 0 0 1 0 0 0 0 0 1 0 0 0
57210- 0 0 0 0 0 0 0 0 0 0 0 0
57211- 0 0 0 0 0 0 0 0 0 0 0 0
57212- 0 0 0 0 0 0 0 0 0 0 0 0
57213- 0 0 0 0 0 0 0 0 0 0 0 0
57214- 0 0 0 0 0 0 0 0 0 14 14 14
57215- 42 42 42 94 94 94 14 14 14 101 101 101
57216-128 128 128 2 2 6 18 18 18 116 116 116
57217-118 98 46 121 92 8 121 92 8 98 78 10
57218-162 162 162 106 106 106 2 2 6 2 2 6
57219- 2 2 6 195 195 195 195 195 195 6 6 6
57220- 2 2 6 2 2 6 2 2 6 2 2 6
57221- 74 74 74 62 62 62 22 22 22 6 6 6
57222- 0 0 0 0 0 0 0 0 0 0 0 0
57223- 0 0 0 0 0 0 0 0 0 0 0 0
57224- 0 0 0 0 0 0 0 0 0 0 0 0
57225- 0 0 0 0 0 0 0 0 0 0 0 0
57226- 0 0 0 0 0 0 0 0 0 0 0 0
57227- 0 0 0 0 0 0 0 0 0 0 0 0
57228- 0 0 0 0 0 0 0 0 1 0 0 1
57229- 0 0 1 0 0 0 0 0 1 0 0 0
57230- 0 0 0 0 0 0 0 0 0 0 0 0
57231- 0 0 0 0 0 0 0 0 0 0 0 0
57232- 0 0 0 0 0 0 0 0 0 0 0 0
57233- 0 0 0 0 0 0 0 0 0 0 0 0
57234- 0 0 0 0 0 0 0 0 0 10 10 10
57235- 38 38 38 90 90 90 14 14 14 58 58 58
57236-210 210 210 26 26 26 54 38 6 154 114 10
57237-226 170 11 236 186 11 225 175 15 184 144 12
57238-215 174 15 175 146 61 37 26 9 2 2 6
57239- 70 70 70 246 246 246 138 138 138 2 2 6
57240- 2 2 6 2 2 6 2 2 6 2 2 6
57241- 70 70 70 66 66 66 26 26 26 6 6 6
57242- 0 0 0 0 0 0 0 0 0 0 0 0
57243- 0 0 0 0 0 0 0 0 0 0 0 0
57244- 0 0 0 0 0 0 0 0 0 0 0 0
57245- 0 0 0 0 0 0 0 0 0 0 0 0
57246- 0 0 0 0 0 0 0 0 0 0 0 0
57247- 0 0 0 0 0 0 0 0 0 0 0 0
57248- 0 0 0 0 0 0 0 0 0 0 0 0
57249- 0 0 0 0 0 0 0 0 0 0 0 0
57250- 0 0 0 0 0 0 0 0 0 0 0 0
57251- 0 0 0 0 0 0 0 0 0 0 0 0
57252- 0 0 0 0 0 0 0 0 0 0 0 0
57253- 0 0 0 0 0 0 0 0 0 0 0 0
57254- 0 0 0 0 0 0 0 0 0 10 10 10
57255- 38 38 38 86 86 86 14 14 14 10 10 10
57256-195 195 195 188 164 115 192 133 9 225 175 15
57257-239 182 13 234 190 10 232 195 16 232 200 30
57258-245 207 45 241 208 19 232 195 16 184 144 12
57259-218 194 134 211 206 186 42 42 42 2 2 6
57260- 2 2 6 2 2 6 2 2 6 2 2 6
57261- 50 50 50 74 74 74 30 30 30 6 6 6
57262- 0 0 0 0 0 0 0 0 0 0 0 0
57263- 0 0 0 0 0 0 0 0 0 0 0 0
57264- 0 0 0 0 0 0 0 0 0 0 0 0
57265- 0 0 0 0 0 0 0 0 0 0 0 0
57266- 0 0 0 0 0 0 0 0 0 0 0 0
57267- 0 0 0 0 0 0 0 0 0 0 0 0
57268- 0 0 0 0 0 0 0 0 0 0 0 0
57269- 0 0 0 0 0 0 0 0 0 0 0 0
57270- 0 0 0 0 0 0 0 0 0 0 0 0
57271- 0 0 0 0 0 0 0 0 0 0 0 0
57272- 0 0 0 0 0 0 0 0 0 0 0 0
57273- 0 0 0 0 0 0 0 0 0 0 0 0
57274- 0 0 0 0 0 0 0 0 0 10 10 10
57275- 34 34 34 86 86 86 14 14 14 2 2 6
57276-121 87 25 192 133 9 219 162 10 239 182 13
57277-236 186 11 232 195 16 241 208 19 244 214 54
57278-246 218 60 246 218 38 246 215 20 241 208 19
57279-241 208 19 226 184 13 121 87 25 2 2 6
57280- 2 2 6 2 2 6 2 2 6 2 2 6
57281- 50 50 50 82 82 82 34 34 34 10 10 10
57282- 0 0 0 0 0 0 0 0 0 0 0 0
57283- 0 0 0 0 0 0 0 0 0 0 0 0
57284- 0 0 0 0 0 0 0 0 0 0 0 0
57285- 0 0 0 0 0 0 0 0 0 0 0 0
57286- 0 0 0 0 0 0 0 0 0 0 0 0
57287- 0 0 0 0 0 0 0 0 0 0 0 0
57288- 0 0 0 0 0 0 0 0 0 0 0 0
57289- 0 0 0 0 0 0 0 0 0 0 0 0
57290- 0 0 0 0 0 0 0 0 0 0 0 0
57291- 0 0 0 0 0 0 0 0 0 0 0 0
57292- 0 0 0 0 0 0 0 0 0 0 0 0
57293- 0 0 0 0 0 0 0 0 0 0 0 0
57294- 0 0 0 0 0 0 0 0 0 10 10 10
57295- 34 34 34 82 82 82 30 30 30 61 42 6
57296-180 123 7 206 145 10 230 174 11 239 182 13
57297-234 190 10 238 202 15 241 208 19 246 218 74
57298-246 218 38 246 215 20 246 215 20 246 215 20
57299-226 184 13 215 174 15 184 144 12 6 6 6
57300- 2 2 6 2 2 6 2 2 6 2 2 6
57301- 26 26 26 94 94 94 42 42 42 14 14 14
57302- 0 0 0 0 0 0 0 0 0 0 0 0
57303- 0 0 0 0 0 0 0 0 0 0 0 0
57304- 0 0 0 0 0 0 0 0 0 0 0 0
57305- 0 0 0 0 0 0 0 0 0 0 0 0
57306- 0 0 0 0 0 0 0 0 0 0 0 0
57307- 0 0 0 0 0 0 0 0 0 0 0 0
57308- 0 0 0 0 0 0 0 0 0 0 0 0
57309- 0 0 0 0 0 0 0 0 0 0 0 0
57310- 0 0 0 0 0 0 0 0 0 0 0 0
57311- 0 0 0 0 0 0 0 0 0 0 0 0
57312- 0 0 0 0 0 0 0 0 0 0 0 0
57313- 0 0 0 0 0 0 0 0 0 0 0 0
57314- 0 0 0 0 0 0 0 0 0 10 10 10
57315- 30 30 30 78 78 78 50 50 50 104 69 6
57316-192 133 9 216 158 10 236 178 12 236 186 11
57317-232 195 16 241 208 19 244 214 54 245 215 43
57318-246 215 20 246 215 20 241 208 19 198 155 10
57319-200 144 11 216 158 10 156 118 10 2 2 6
57320- 2 2 6 2 2 6 2 2 6 2 2 6
57321- 6 6 6 90 90 90 54 54 54 18 18 18
57322- 6 6 6 0 0 0 0 0 0 0 0 0
57323- 0 0 0 0 0 0 0 0 0 0 0 0
57324- 0 0 0 0 0 0 0 0 0 0 0 0
57325- 0 0 0 0 0 0 0 0 0 0 0 0
57326- 0 0 0 0 0 0 0 0 0 0 0 0
57327- 0 0 0 0 0 0 0 0 0 0 0 0
57328- 0 0 0 0 0 0 0 0 0 0 0 0
57329- 0 0 0 0 0 0 0 0 0 0 0 0
57330- 0 0 0 0 0 0 0 0 0 0 0 0
57331- 0 0 0 0 0 0 0 0 0 0 0 0
57332- 0 0 0 0 0 0 0 0 0 0 0 0
57333- 0 0 0 0 0 0 0 0 0 0 0 0
57334- 0 0 0 0 0 0 0 0 0 10 10 10
57335- 30 30 30 78 78 78 46 46 46 22 22 22
57336-137 92 6 210 162 10 239 182 13 238 190 10
57337-238 202 15 241 208 19 246 215 20 246 215 20
57338-241 208 19 203 166 17 185 133 11 210 150 10
57339-216 158 10 210 150 10 102 78 10 2 2 6
57340- 6 6 6 54 54 54 14 14 14 2 2 6
57341- 2 2 6 62 62 62 74 74 74 30 30 30
57342- 10 10 10 0 0 0 0 0 0 0 0 0
57343- 0 0 0 0 0 0 0 0 0 0 0 0
57344- 0 0 0 0 0 0 0 0 0 0 0 0
57345- 0 0 0 0 0 0 0 0 0 0 0 0
57346- 0 0 0 0 0 0 0 0 0 0 0 0
57347- 0 0 0 0 0 0 0 0 0 0 0 0
57348- 0 0 0 0 0 0 0 0 0 0 0 0
57349- 0 0 0 0 0 0 0 0 0 0 0 0
57350- 0 0 0 0 0 0 0 0 0 0 0 0
57351- 0 0 0 0 0 0 0 0 0 0 0 0
57352- 0 0 0 0 0 0 0 0 0 0 0 0
57353- 0 0 0 0 0 0 0 0 0 0 0 0
57354- 0 0 0 0 0 0 0 0 0 10 10 10
57355- 34 34 34 78 78 78 50 50 50 6 6 6
57356- 94 70 30 139 102 15 190 146 13 226 184 13
57357-232 200 30 232 195 16 215 174 15 190 146 13
57358-168 122 10 192 133 9 210 150 10 213 154 11
57359-202 150 34 182 157 106 101 98 89 2 2 6
57360- 2 2 6 78 78 78 116 116 116 58 58 58
57361- 2 2 6 22 22 22 90 90 90 46 46 46
57362- 18 18 18 6 6 6 0 0 0 0 0 0
57363- 0 0 0 0 0 0 0 0 0 0 0 0
57364- 0 0 0 0 0 0 0 0 0 0 0 0
57365- 0 0 0 0 0 0 0 0 0 0 0 0
57366- 0 0 0 0 0 0 0 0 0 0 0 0
57367- 0 0 0 0 0 0 0 0 0 0 0 0
57368- 0 0 0 0 0 0 0 0 0 0 0 0
57369- 0 0 0 0 0 0 0 0 0 0 0 0
57370- 0 0 0 0 0 0 0 0 0 0 0 0
57371- 0 0 0 0 0 0 0 0 0 0 0 0
57372- 0 0 0 0 0 0 0 0 0 0 0 0
57373- 0 0 0 0 0 0 0 0 0 0 0 0
57374- 0 0 0 0 0 0 0 0 0 10 10 10
57375- 38 38 38 86 86 86 50 50 50 6 6 6
57376-128 128 128 174 154 114 156 107 11 168 122 10
57377-198 155 10 184 144 12 197 138 11 200 144 11
57378-206 145 10 206 145 10 197 138 11 188 164 115
57379-195 195 195 198 198 198 174 174 174 14 14 14
57380- 2 2 6 22 22 22 116 116 116 116 116 116
57381- 22 22 22 2 2 6 74 74 74 70 70 70
57382- 30 30 30 10 10 10 0 0 0 0 0 0
57383- 0 0 0 0 0 0 0 0 0 0 0 0
57384- 0 0 0 0 0 0 0 0 0 0 0 0
57385- 0 0 0 0 0 0 0 0 0 0 0 0
57386- 0 0 0 0 0 0 0 0 0 0 0 0
57387- 0 0 0 0 0 0 0 0 0 0 0 0
57388- 0 0 0 0 0 0 0 0 0 0 0 0
57389- 0 0 0 0 0 0 0 0 0 0 0 0
57390- 0 0 0 0 0 0 0 0 0 0 0 0
57391- 0 0 0 0 0 0 0 0 0 0 0 0
57392- 0 0 0 0 0 0 0 0 0 0 0 0
57393- 0 0 0 0 0 0 0 0 0 0 0 0
57394- 0 0 0 0 0 0 6 6 6 18 18 18
57395- 50 50 50 101 101 101 26 26 26 10 10 10
57396-138 138 138 190 190 190 174 154 114 156 107 11
57397-197 138 11 200 144 11 197 138 11 192 133 9
57398-180 123 7 190 142 34 190 178 144 187 187 187
57399-202 202 202 221 221 221 214 214 214 66 66 66
57400- 2 2 6 2 2 6 50 50 50 62 62 62
57401- 6 6 6 2 2 6 10 10 10 90 90 90
57402- 50 50 50 18 18 18 6 6 6 0 0 0
57403- 0 0 0 0 0 0 0 0 0 0 0 0
57404- 0 0 0 0 0 0 0 0 0 0 0 0
57405- 0 0 0 0 0 0 0 0 0 0 0 0
57406- 0 0 0 0 0 0 0 0 0 0 0 0
57407- 0 0 0 0 0 0 0 0 0 0 0 0
57408- 0 0 0 0 0 0 0 0 0 0 0 0
57409- 0 0 0 0 0 0 0 0 0 0 0 0
57410- 0 0 0 0 0 0 0 0 0 0 0 0
57411- 0 0 0 0 0 0 0 0 0 0 0 0
57412- 0 0 0 0 0 0 0 0 0 0 0 0
57413- 0 0 0 0 0 0 0 0 0 0 0 0
57414- 0 0 0 0 0 0 10 10 10 34 34 34
57415- 74 74 74 74 74 74 2 2 6 6 6 6
57416-144 144 144 198 198 198 190 190 190 178 166 146
57417-154 121 60 156 107 11 156 107 11 168 124 44
57418-174 154 114 187 187 187 190 190 190 210 210 210
57419-246 246 246 253 253 253 253 253 253 182 182 182
57420- 6 6 6 2 2 6 2 2 6 2 2 6
57421- 2 2 6 2 2 6 2 2 6 62 62 62
57422- 74 74 74 34 34 34 14 14 14 0 0 0
57423- 0 0 0 0 0 0 0 0 0 0 0 0
57424- 0 0 0 0 0 0 0 0 0 0 0 0
57425- 0 0 0 0 0 0 0 0 0 0 0 0
57426- 0 0 0 0 0 0 0 0 0 0 0 0
57427- 0 0 0 0 0 0 0 0 0 0 0 0
57428- 0 0 0 0 0 0 0 0 0 0 0 0
57429- 0 0 0 0 0 0 0 0 0 0 0 0
57430- 0 0 0 0 0 0 0 0 0 0 0 0
57431- 0 0 0 0 0 0 0 0 0 0 0 0
57432- 0 0 0 0 0 0 0 0 0 0 0 0
57433- 0 0 0 0 0 0 0 0 0 0 0 0
57434- 0 0 0 10 10 10 22 22 22 54 54 54
57435- 94 94 94 18 18 18 2 2 6 46 46 46
57436-234 234 234 221 221 221 190 190 190 190 190 190
57437-190 190 190 187 187 187 187 187 187 190 190 190
57438-190 190 190 195 195 195 214 214 214 242 242 242
57439-253 253 253 253 253 253 253 253 253 253 253 253
57440- 82 82 82 2 2 6 2 2 6 2 2 6
57441- 2 2 6 2 2 6 2 2 6 14 14 14
57442- 86 86 86 54 54 54 22 22 22 6 6 6
57443- 0 0 0 0 0 0 0 0 0 0 0 0
57444- 0 0 0 0 0 0 0 0 0 0 0 0
57445- 0 0 0 0 0 0 0 0 0 0 0 0
57446- 0 0 0 0 0 0 0 0 0 0 0 0
57447- 0 0 0 0 0 0 0 0 0 0 0 0
57448- 0 0 0 0 0 0 0 0 0 0 0 0
57449- 0 0 0 0 0 0 0 0 0 0 0 0
57450- 0 0 0 0 0 0 0 0 0 0 0 0
57451- 0 0 0 0 0 0 0 0 0 0 0 0
57452- 0 0 0 0 0 0 0 0 0 0 0 0
57453- 0 0 0 0 0 0 0 0 0 0 0 0
57454- 6 6 6 18 18 18 46 46 46 90 90 90
57455- 46 46 46 18 18 18 6 6 6 182 182 182
57456-253 253 253 246 246 246 206 206 206 190 190 190
57457-190 190 190 190 190 190 190 190 190 190 190 190
57458-206 206 206 231 231 231 250 250 250 253 253 253
57459-253 253 253 253 253 253 253 253 253 253 253 253
57460-202 202 202 14 14 14 2 2 6 2 2 6
57461- 2 2 6 2 2 6 2 2 6 2 2 6
57462- 42 42 42 86 86 86 42 42 42 18 18 18
57463- 6 6 6 0 0 0 0 0 0 0 0 0
57464- 0 0 0 0 0 0 0 0 0 0 0 0
57465- 0 0 0 0 0 0 0 0 0 0 0 0
57466- 0 0 0 0 0 0 0 0 0 0 0 0
57467- 0 0 0 0 0 0 0 0 0 0 0 0
57468- 0 0 0 0 0 0 0 0 0 0 0 0
57469- 0 0 0 0 0 0 0 0 0 0 0 0
57470- 0 0 0 0 0 0 0 0 0 0 0 0
57471- 0 0 0 0 0 0 0 0 0 0 0 0
57472- 0 0 0 0 0 0 0 0 0 0 0 0
57473- 0 0 0 0 0 0 0 0 0 6 6 6
57474- 14 14 14 38 38 38 74 74 74 66 66 66
57475- 2 2 6 6 6 6 90 90 90 250 250 250
57476-253 253 253 253 253 253 238 238 238 198 198 198
57477-190 190 190 190 190 190 195 195 195 221 221 221
57478-246 246 246 253 253 253 253 253 253 253 253 253
57479-253 253 253 253 253 253 253 253 253 253 253 253
57480-253 253 253 82 82 82 2 2 6 2 2 6
57481- 2 2 6 2 2 6 2 2 6 2 2 6
57482- 2 2 6 78 78 78 70 70 70 34 34 34
57483- 14 14 14 6 6 6 0 0 0 0 0 0
57484- 0 0 0 0 0 0 0 0 0 0 0 0
57485- 0 0 0 0 0 0 0 0 0 0 0 0
57486- 0 0 0 0 0 0 0 0 0 0 0 0
57487- 0 0 0 0 0 0 0 0 0 0 0 0
57488- 0 0 0 0 0 0 0 0 0 0 0 0
57489- 0 0 0 0 0 0 0 0 0 0 0 0
57490- 0 0 0 0 0 0 0 0 0 0 0 0
57491- 0 0 0 0 0 0 0 0 0 0 0 0
57492- 0 0 0 0 0 0 0 0 0 0 0 0
57493- 0 0 0 0 0 0 0 0 0 14 14 14
57494- 34 34 34 66 66 66 78 78 78 6 6 6
57495- 2 2 6 18 18 18 218 218 218 253 253 253
57496-253 253 253 253 253 253 253 253 253 246 246 246
57497-226 226 226 231 231 231 246 246 246 253 253 253
57498-253 253 253 253 253 253 253 253 253 253 253 253
57499-253 253 253 253 253 253 253 253 253 253 253 253
57500-253 253 253 178 178 178 2 2 6 2 2 6
57501- 2 2 6 2 2 6 2 2 6 2 2 6
57502- 2 2 6 18 18 18 90 90 90 62 62 62
57503- 30 30 30 10 10 10 0 0 0 0 0 0
57504- 0 0 0 0 0 0 0 0 0 0 0 0
57505- 0 0 0 0 0 0 0 0 0 0 0 0
57506- 0 0 0 0 0 0 0 0 0 0 0 0
57507- 0 0 0 0 0 0 0 0 0 0 0 0
57508- 0 0 0 0 0 0 0 0 0 0 0 0
57509- 0 0 0 0 0 0 0 0 0 0 0 0
57510- 0 0 0 0 0 0 0 0 0 0 0 0
57511- 0 0 0 0 0 0 0 0 0 0 0 0
57512- 0 0 0 0 0 0 0 0 0 0 0 0
57513- 0 0 0 0 0 0 10 10 10 26 26 26
57514- 58 58 58 90 90 90 18 18 18 2 2 6
57515- 2 2 6 110 110 110 253 253 253 253 253 253
57516-253 253 253 253 253 253 253 253 253 253 253 253
57517-250 250 250 253 253 253 253 253 253 253 253 253
57518-253 253 253 253 253 253 253 253 253 253 253 253
57519-253 253 253 253 253 253 253 253 253 253 253 253
57520-253 253 253 231 231 231 18 18 18 2 2 6
57521- 2 2 6 2 2 6 2 2 6 2 2 6
57522- 2 2 6 2 2 6 18 18 18 94 94 94
57523- 54 54 54 26 26 26 10 10 10 0 0 0
57524- 0 0 0 0 0 0 0 0 0 0 0 0
57525- 0 0 0 0 0 0 0 0 0 0 0 0
57526- 0 0 0 0 0 0 0 0 0 0 0 0
57527- 0 0 0 0 0 0 0 0 0 0 0 0
57528- 0 0 0 0 0 0 0 0 0 0 0 0
57529- 0 0 0 0 0 0 0 0 0 0 0 0
57530- 0 0 0 0 0 0 0 0 0 0 0 0
57531- 0 0 0 0 0 0 0 0 0 0 0 0
57532- 0 0 0 0 0 0 0 0 0 0 0 0
57533- 0 0 0 6 6 6 22 22 22 50 50 50
57534- 90 90 90 26 26 26 2 2 6 2 2 6
57535- 14 14 14 195 195 195 250 250 250 253 253 253
57536-253 253 253 253 253 253 253 253 253 253 253 253
57537-253 253 253 253 253 253 253 253 253 253 253 253
57538-253 253 253 253 253 253 253 253 253 253 253 253
57539-253 253 253 253 253 253 253 253 253 253 253 253
57540-250 250 250 242 242 242 54 54 54 2 2 6
57541- 2 2 6 2 2 6 2 2 6 2 2 6
57542- 2 2 6 2 2 6 2 2 6 38 38 38
57543- 86 86 86 50 50 50 22 22 22 6 6 6
57544- 0 0 0 0 0 0 0 0 0 0 0 0
57545- 0 0 0 0 0 0 0 0 0 0 0 0
57546- 0 0 0 0 0 0 0 0 0 0 0 0
57547- 0 0 0 0 0 0 0 0 0 0 0 0
57548- 0 0 0 0 0 0 0 0 0 0 0 0
57549- 0 0 0 0 0 0 0 0 0 0 0 0
57550- 0 0 0 0 0 0 0 0 0 0 0 0
57551- 0 0 0 0 0 0 0 0 0 0 0 0
57552- 0 0 0 0 0 0 0 0 0 0 0 0
57553- 6 6 6 14 14 14 38 38 38 82 82 82
57554- 34 34 34 2 2 6 2 2 6 2 2 6
57555- 42 42 42 195 195 195 246 246 246 253 253 253
57556-253 253 253 253 253 253 253 253 253 250 250 250
57557-242 242 242 242 242 242 250 250 250 253 253 253
57558-253 253 253 253 253 253 253 253 253 253 253 253
57559-253 253 253 250 250 250 246 246 246 238 238 238
57560-226 226 226 231 231 231 101 101 101 6 6 6
57561- 2 2 6 2 2 6 2 2 6 2 2 6
57562- 2 2 6 2 2 6 2 2 6 2 2 6
57563- 38 38 38 82 82 82 42 42 42 14 14 14
57564- 6 6 6 0 0 0 0 0 0 0 0 0
57565- 0 0 0 0 0 0 0 0 0 0 0 0
57566- 0 0 0 0 0 0 0 0 0 0 0 0
57567- 0 0 0 0 0 0 0 0 0 0 0 0
57568- 0 0 0 0 0 0 0 0 0 0 0 0
57569- 0 0 0 0 0 0 0 0 0 0 0 0
57570- 0 0 0 0 0 0 0 0 0 0 0 0
57571- 0 0 0 0 0 0 0 0 0 0 0 0
57572- 0 0 0 0 0 0 0 0 0 0 0 0
57573- 10 10 10 26 26 26 62 62 62 66 66 66
57574- 2 2 6 2 2 6 2 2 6 6 6 6
57575- 70 70 70 170 170 170 206 206 206 234 234 234
57576-246 246 246 250 250 250 250 250 250 238 238 238
57577-226 226 226 231 231 231 238 238 238 250 250 250
57578-250 250 250 250 250 250 246 246 246 231 231 231
57579-214 214 214 206 206 206 202 202 202 202 202 202
57580-198 198 198 202 202 202 182 182 182 18 18 18
57581- 2 2 6 2 2 6 2 2 6 2 2 6
57582- 2 2 6 2 2 6 2 2 6 2 2 6
57583- 2 2 6 62 62 62 66 66 66 30 30 30
57584- 10 10 10 0 0 0 0 0 0 0 0 0
57585- 0 0 0 0 0 0 0 0 0 0 0 0
57586- 0 0 0 0 0 0 0 0 0 0 0 0
57587- 0 0 0 0 0 0 0 0 0 0 0 0
57588- 0 0 0 0 0 0 0 0 0 0 0 0
57589- 0 0 0 0 0 0 0 0 0 0 0 0
57590- 0 0 0 0 0 0 0 0 0 0 0 0
57591- 0 0 0 0 0 0 0 0 0 0 0 0
57592- 0 0 0 0 0 0 0 0 0 0 0 0
57593- 14 14 14 42 42 42 82 82 82 18 18 18
57594- 2 2 6 2 2 6 2 2 6 10 10 10
57595- 94 94 94 182 182 182 218 218 218 242 242 242
57596-250 250 250 253 253 253 253 253 253 250 250 250
57597-234 234 234 253 253 253 253 253 253 253 253 253
57598-253 253 253 253 253 253 253 253 253 246 246 246
57599-238 238 238 226 226 226 210 210 210 202 202 202
57600-195 195 195 195 195 195 210 210 210 158 158 158
57601- 6 6 6 14 14 14 50 50 50 14 14 14
57602- 2 2 6 2 2 6 2 2 6 2 2 6
57603- 2 2 6 6 6 6 86 86 86 46 46 46
57604- 18 18 18 6 6 6 0 0 0 0 0 0
57605- 0 0 0 0 0 0 0 0 0 0 0 0
57606- 0 0 0 0 0 0 0 0 0 0 0 0
57607- 0 0 0 0 0 0 0 0 0 0 0 0
57608- 0 0 0 0 0 0 0 0 0 0 0 0
57609- 0 0 0 0 0 0 0 0 0 0 0 0
57610- 0 0 0 0 0 0 0 0 0 0 0 0
57611- 0 0 0 0 0 0 0 0 0 0 0 0
57612- 0 0 0 0 0 0 0 0 0 6 6 6
57613- 22 22 22 54 54 54 70 70 70 2 2 6
57614- 2 2 6 10 10 10 2 2 6 22 22 22
57615-166 166 166 231 231 231 250 250 250 253 253 253
57616-253 253 253 253 253 253 253 253 253 250 250 250
57617-242 242 242 253 253 253 253 253 253 253 253 253
57618-253 253 253 253 253 253 253 253 253 253 253 253
57619-253 253 253 253 253 253 253 253 253 246 246 246
57620-231 231 231 206 206 206 198 198 198 226 226 226
57621- 94 94 94 2 2 6 6 6 6 38 38 38
57622- 30 30 30 2 2 6 2 2 6 2 2 6
57623- 2 2 6 2 2 6 62 62 62 66 66 66
57624- 26 26 26 10 10 10 0 0 0 0 0 0
57625- 0 0 0 0 0 0 0 0 0 0 0 0
57626- 0 0 0 0 0 0 0 0 0 0 0 0
57627- 0 0 0 0 0 0 0 0 0 0 0 0
57628- 0 0 0 0 0 0 0 0 0 0 0 0
57629- 0 0 0 0 0 0 0 0 0 0 0 0
57630- 0 0 0 0 0 0 0 0 0 0 0 0
57631- 0 0 0 0 0 0 0 0 0 0 0 0
57632- 0 0 0 0 0 0 0 0 0 10 10 10
57633- 30 30 30 74 74 74 50 50 50 2 2 6
57634- 26 26 26 26 26 26 2 2 6 106 106 106
57635-238 238 238 253 253 253 253 253 253 253 253 253
57636-253 253 253 253 253 253 253 253 253 253 253 253
57637-253 253 253 253 253 253 253 253 253 253 253 253
57638-253 253 253 253 253 253 253 253 253 253 253 253
57639-253 253 253 253 253 253 253 253 253 253 253 253
57640-253 253 253 246 246 246 218 218 218 202 202 202
57641-210 210 210 14 14 14 2 2 6 2 2 6
57642- 30 30 30 22 22 22 2 2 6 2 2 6
57643- 2 2 6 2 2 6 18 18 18 86 86 86
57644- 42 42 42 14 14 14 0 0 0 0 0 0
57645- 0 0 0 0 0 0 0 0 0 0 0 0
57646- 0 0 0 0 0 0 0 0 0 0 0 0
57647- 0 0 0 0 0 0 0 0 0 0 0 0
57648- 0 0 0 0 0 0 0 0 0 0 0 0
57649- 0 0 0 0 0 0 0 0 0 0 0 0
57650- 0 0 0 0 0 0 0 0 0 0 0 0
57651- 0 0 0 0 0 0 0 0 0 0 0 0
57652- 0 0 0 0 0 0 0 0 0 14 14 14
57653- 42 42 42 90 90 90 22 22 22 2 2 6
57654- 42 42 42 2 2 6 18 18 18 218 218 218
57655-253 253 253 253 253 253 253 253 253 253 253 253
57656-253 253 253 253 253 253 253 253 253 253 253 253
57657-253 253 253 253 253 253 253 253 253 253 253 253
57658-253 253 253 253 253 253 253 253 253 253 253 253
57659-253 253 253 253 253 253 253 253 253 253 253 253
57660-253 253 253 253 253 253 250 250 250 221 221 221
57661-218 218 218 101 101 101 2 2 6 14 14 14
57662- 18 18 18 38 38 38 10 10 10 2 2 6
57663- 2 2 6 2 2 6 2 2 6 78 78 78
57664- 58 58 58 22 22 22 6 6 6 0 0 0
57665- 0 0 0 0 0 0 0 0 0 0 0 0
57666- 0 0 0 0 0 0 0 0 0 0 0 0
57667- 0 0 0 0 0 0 0 0 0 0 0 0
57668- 0 0 0 0 0 0 0 0 0 0 0 0
57669- 0 0 0 0 0 0 0 0 0 0 0 0
57670- 0 0 0 0 0 0 0 0 0 0 0 0
57671- 0 0 0 0 0 0 0 0 0 0 0 0
57672- 0 0 0 0 0 0 6 6 6 18 18 18
57673- 54 54 54 82 82 82 2 2 6 26 26 26
57674- 22 22 22 2 2 6 123 123 123 253 253 253
57675-253 253 253 253 253 253 253 253 253 253 253 253
57676-253 253 253 253 253 253 253 253 253 253 253 253
57677-253 253 253 253 253 253 253 253 253 253 253 253
57678-253 253 253 253 253 253 253 253 253 253 253 253
57679-253 253 253 253 253 253 253 253 253 253 253 253
57680-253 253 253 253 253 253 253 253 253 250 250 250
57681-238 238 238 198 198 198 6 6 6 38 38 38
57682- 58 58 58 26 26 26 38 38 38 2 2 6
57683- 2 2 6 2 2 6 2 2 6 46 46 46
57684- 78 78 78 30 30 30 10 10 10 0 0 0
57685- 0 0 0 0 0 0 0 0 0 0 0 0
57686- 0 0 0 0 0 0 0 0 0 0 0 0
57687- 0 0 0 0 0 0 0 0 0 0 0 0
57688- 0 0 0 0 0 0 0 0 0 0 0 0
57689- 0 0 0 0 0 0 0 0 0 0 0 0
57690- 0 0 0 0 0 0 0 0 0 0 0 0
57691- 0 0 0 0 0 0 0 0 0 0 0 0
57692- 0 0 0 0 0 0 10 10 10 30 30 30
57693- 74 74 74 58 58 58 2 2 6 42 42 42
57694- 2 2 6 22 22 22 231 231 231 253 253 253
57695-253 253 253 253 253 253 253 253 253 253 253 253
57696-253 253 253 253 253 253 253 253 253 250 250 250
57697-253 253 253 253 253 253 253 253 253 253 253 253
57698-253 253 253 253 253 253 253 253 253 253 253 253
57699-253 253 253 253 253 253 253 253 253 253 253 253
57700-253 253 253 253 253 253 253 253 253 253 253 253
57701-253 253 253 246 246 246 46 46 46 38 38 38
57702- 42 42 42 14 14 14 38 38 38 14 14 14
57703- 2 2 6 2 2 6 2 2 6 6 6 6
57704- 86 86 86 46 46 46 14 14 14 0 0 0
57705- 0 0 0 0 0 0 0 0 0 0 0 0
57706- 0 0 0 0 0 0 0 0 0 0 0 0
57707- 0 0 0 0 0 0 0 0 0 0 0 0
57708- 0 0 0 0 0 0 0 0 0 0 0 0
57709- 0 0 0 0 0 0 0 0 0 0 0 0
57710- 0 0 0 0 0 0 0 0 0 0 0 0
57711- 0 0 0 0 0 0 0 0 0 0 0 0
57712- 0 0 0 6 6 6 14 14 14 42 42 42
57713- 90 90 90 18 18 18 18 18 18 26 26 26
57714- 2 2 6 116 116 116 253 253 253 253 253 253
57715-253 253 253 253 253 253 253 253 253 253 253 253
57716-253 253 253 253 253 253 250 250 250 238 238 238
57717-253 253 253 253 253 253 253 253 253 253 253 253
57718-253 253 253 253 253 253 253 253 253 253 253 253
57719-253 253 253 253 253 253 253 253 253 253 253 253
57720-253 253 253 253 253 253 253 253 253 253 253 253
57721-253 253 253 253 253 253 94 94 94 6 6 6
57722- 2 2 6 2 2 6 10 10 10 34 34 34
57723- 2 2 6 2 2 6 2 2 6 2 2 6
57724- 74 74 74 58 58 58 22 22 22 6 6 6
57725- 0 0 0 0 0 0 0 0 0 0 0 0
57726- 0 0 0 0 0 0 0 0 0 0 0 0
57727- 0 0 0 0 0 0 0 0 0 0 0 0
57728- 0 0 0 0 0 0 0 0 0 0 0 0
57729- 0 0 0 0 0 0 0 0 0 0 0 0
57730- 0 0 0 0 0 0 0 0 0 0 0 0
57731- 0 0 0 0 0 0 0 0 0 0 0 0
57732- 0 0 0 10 10 10 26 26 26 66 66 66
57733- 82 82 82 2 2 6 38 38 38 6 6 6
57734- 14 14 14 210 210 210 253 253 253 253 253 253
57735-253 253 253 253 253 253 253 253 253 253 253 253
57736-253 253 253 253 253 253 246 246 246 242 242 242
57737-253 253 253 253 253 253 253 253 253 253 253 253
57738-253 253 253 253 253 253 253 253 253 253 253 253
57739-253 253 253 253 253 253 253 253 253 253 253 253
57740-253 253 253 253 253 253 253 253 253 253 253 253
57741-253 253 253 253 253 253 144 144 144 2 2 6
57742- 2 2 6 2 2 6 2 2 6 46 46 46
57743- 2 2 6 2 2 6 2 2 6 2 2 6
57744- 42 42 42 74 74 74 30 30 30 10 10 10
57745- 0 0 0 0 0 0 0 0 0 0 0 0
57746- 0 0 0 0 0 0 0 0 0 0 0 0
57747- 0 0 0 0 0 0 0 0 0 0 0 0
57748- 0 0 0 0 0 0 0 0 0 0 0 0
57749- 0 0 0 0 0 0 0 0 0 0 0 0
57750- 0 0 0 0 0 0 0 0 0 0 0 0
57751- 0 0 0 0 0 0 0 0 0 0 0 0
57752- 6 6 6 14 14 14 42 42 42 90 90 90
57753- 26 26 26 6 6 6 42 42 42 2 2 6
57754- 74 74 74 250 250 250 253 253 253 253 253 253
57755-253 253 253 253 253 253 253 253 253 253 253 253
57756-253 253 253 253 253 253 242 242 242 242 242 242
57757-253 253 253 253 253 253 253 253 253 253 253 253
57758-253 253 253 253 253 253 253 253 253 253 253 253
57759-253 253 253 253 253 253 253 253 253 253 253 253
57760-253 253 253 253 253 253 253 253 253 253 253 253
57761-253 253 253 253 253 253 182 182 182 2 2 6
57762- 2 2 6 2 2 6 2 2 6 46 46 46
57763- 2 2 6 2 2 6 2 2 6 2 2 6
57764- 10 10 10 86 86 86 38 38 38 10 10 10
57765- 0 0 0 0 0 0 0 0 0 0 0 0
57766- 0 0 0 0 0 0 0 0 0 0 0 0
57767- 0 0 0 0 0 0 0 0 0 0 0 0
57768- 0 0 0 0 0 0 0 0 0 0 0 0
57769- 0 0 0 0 0 0 0 0 0 0 0 0
57770- 0 0 0 0 0 0 0 0 0 0 0 0
57771- 0 0 0 0 0 0 0 0 0 0 0 0
57772- 10 10 10 26 26 26 66 66 66 82 82 82
57773- 2 2 6 22 22 22 18 18 18 2 2 6
57774-149 149 149 253 253 253 253 253 253 253 253 253
57775-253 253 253 253 253 253 253 253 253 253 253 253
57776-253 253 253 253 253 253 234 234 234 242 242 242
57777-253 253 253 253 253 253 253 253 253 253 253 253
57778-253 253 253 253 253 253 253 253 253 253 253 253
57779-253 253 253 253 253 253 253 253 253 253 253 253
57780-253 253 253 253 253 253 253 253 253 253 253 253
57781-253 253 253 253 253 253 206 206 206 2 2 6
57782- 2 2 6 2 2 6 2 2 6 38 38 38
57783- 2 2 6 2 2 6 2 2 6 2 2 6
57784- 6 6 6 86 86 86 46 46 46 14 14 14
57785- 0 0 0 0 0 0 0 0 0 0 0 0
57786- 0 0 0 0 0 0 0 0 0 0 0 0
57787- 0 0 0 0 0 0 0 0 0 0 0 0
57788- 0 0 0 0 0 0 0 0 0 0 0 0
57789- 0 0 0 0 0 0 0 0 0 0 0 0
57790- 0 0 0 0 0 0 0 0 0 0 0 0
57791- 0 0 0 0 0 0 0 0 0 6 6 6
57792- 18 18 18 46 46 46 86 86 86 18 18 18
57793- 2 2 6 34 34 34 10 10 10 6 6 6
57794-210 210 210 253 253 253 253 253 253 253 253 253
57795-253 253 253 253 253 253 253 253 253 253 253 253
57796-253 253 253 253 253 253 234 234 234 242 242 242
57797-253 253 253 253 253 253 253 253 253 253 253 253
57798-253 253 253 253 253 253 253 253 253 253 253 253
57799-253 253 253 253 253 253 253 253 253 253 253 253
57800-253 253 253 253 253 253 253 253 253 253 253 253
57801-253 253 253 253 253 253 221 221 221 6 6 6
57802- 2 2 6 2 2 6 6 6 6 30 30 30
57803- 2 2 6 2 2 6 2 2 6 2 2 6
57804- 2 2 6 82 82 82 54 54 54 18 18 18
57805- 6 6 6 0 0 0 0 0 0 0 0 0
57806- 0 0 0 0 0 0 0 0 0 0 0 0
57807- 0 0 0 0 0 0 0 0 0 0 0 0
57808- 0 0 0 0 0 0 0 0 0 0 0 0
57809- 0 0 0 0 0 0 0 0 0 0 0 0
57810- 0 0 0 0 0 0 0 0 0 0 0 0
57811- 0 0 0 0 0 0 0 0 0 10 10 10
57812- 26 26 26 66 66 66 62 62 62 2 2 6
57813- 2 2 6 38 38 38 10 10 10 26 26 26
57814-238 238 238 253 253 253 253 253 253 253 253 253
57815-253 253 253 253 253 253 253 253 253 253 253 253
57816-253 253 253 253 253 253 231 231 231 238 238 238
57817-253 253 253 253 253 253 253 253 253 253 253 253
57818-253 253 253 253 253 253 253 253 253 253 253 253
57819-253 253 253 253 253 253 253 253 253 253 253 253
57820-253 253 253 253 253 253 253 253 253 253 253 253
57821-253 253 253 253 253 253 231 231 231 6 6 6
57822- 2 2 6 2 2 6 10 10 10 30 30 30
57823- 2 2 6 2 2 6 2 2 6 2 2 6
57824- 2 2 6 66 66 66 58 58 58 22 22 22
57825- 6 6 6 0 0 0 0 0 0 0 0 0
57826- 0 0 0 0 0 0 0 0 0 0 0 0
57827- 0 0 0 0 0 0 0 0 0 0 0 0
57828- 0 0 0 0 0 0 0 0 0 0 0 0
57829- 0 0 0 0 0 0 0 0 0 0 0 0
57830- 0 0 0 0 0 0 0 0 0 0 0 0
57831- 0 0 0 0 0 0 0 0 0 10 10 10
57832- 38 38 38 78 78 78 6 6 6 2 2 6
57833- 2 2 6 46 46 46 14 14 14 42 42 42
57834-246 246 246 253 253 253 253 253 253 253 253 253
57835-253 253 253 253 253 253 253 253 253 253 253 253
57836-253 253 253 253 253 253 231 231 231 242 242 242
57837-253 253 253 253 253 253 253 253 253 253 253 253
57838-253 253 253 253 253 253 253 253 253 253 253 253
57839-253 253 253 253 253 253 253 253 253 253 253 253
57840-253 253 253 253 253 253 253 253 253 253 253 253
57841-253 253 253 253 253 253 234 234 234 10 10 10
57842- 2 2 6 2 2 6 22 22 22 14 14 14
57843- 2 2 6 2 2 6 2 2 6 2 2 6
57844- 2 2 6 66 66 66 62 62 62 22 22 22
57845- 6 6 6 0 0 0 0 0 0 0 0 0
57846- 0 0 0 0 0 0 0 0 0 0 0 0
57847- 0 0 0 0 0 0 0 0 0 0 0 0
57848- 0 0 0 0 0 0 0 0 0 0 0 0
57849- 0 0 0 0 0 0 0 0 0 0 0 0
57850- 0 0 0 0 0 0 0 0 0 0 0 0
57851- 0 0 0 0 0 0 6 6 6 18 18 18
57852- 50 50 50 74 74 74 2 2 6 2 2 6
57853- 14 14 14 70 70 70 34 34 34 62 62 62
57854-250 250 250 253 253 253 253 253 253 253 253 253
57855-253 253 253 253 253 253 253 253 253 253 253 253
57856-253 253 253 253 253 253 231 231 231 246 246 246
57857-253 253 253 253 253 253 253 253 253 253 253 253
57858-253 253 253 253 253 253 253 253 253 253 253 253
57859-253 253 253 253 253 253 253 253 253 253 253 253
57860-253 253 253 253 253 253 253 253 253 253 253 253
57861-253 253 253 253 253 253 234 234 234 14 14 14
57862- 2 2 6 2 2 6 30 30 30 2 2 6
57863- 2 2 6 2 2 6 2 2 6 2 2 6
57864- 2 2 6 66 66 66 62 62 62 22 22 22
57865- 6 6 6 0 0 0 0 0 0 0 0 0
57866- 0 0 0 0 0 0 0 0 0 0 0 0
57867- 0 0 0 0 0 0 0 0 0 0 0 0
57868- 0 0 0 0 0 0 0 0 0 0 0 0
57869- 0 0 0 0 0 0 0 0 0 0 0 0
57870- 0 0 0 0 0 0 0 0 0 0 0 0
57871- 0 0 0 0 0 0 6 6 6 18 18 18
57872- 54 54 54 62 62 62 2 2 6 2 2 6
57873- 2 2 6 30 30 30 46 46 46 70 70 70
57874-250 250 250 253 253 253 253 253 253 253 253 253
57875-253 253 253 253 253 253 253 253 253 253 253 253
57876-253 253 253 253 253 253 231 231 231 246 246 246
57877-253 253 253 253 253 253 253 253 253 253 253 253
57878-253 253 253 253 253 253 253 253 253 253 253 253
57879-253 253 253 253 253 253 253 253 253 253 253 253
57880-253 253 253 253 253 253 253 253 253 253 253 253
57881-253 253 253 253 253 253 226 226 226 10 10 10
57882- 2 2 6 6 6 6 30 30 30 2 2 6
57883- 2 2 6 2 2 6 2 2 6 2 2 6
57884- 2 2 6 66 66 66 58 58 58 22 22 22
57885- 6 6 6 0 0 0 0 0 0 0 0 0
57886- 0 0 0 0 0 0 0 0 0 0 0 0
57887- 0 0 0 0 0 0 0 0 0 0 0 0
57888- 0 0 0 0 0 0 0 0 0 0 0 0
57889- 0 0 0 0 0 0 0 0 0 0 0 0
57890- 0 0 0 0 0 0 0 0 0 0 0 0
57891- 0 0 0 0 0 0 6 6 6 22 22 22
57892- 58 58 58 62 62 62 2 2 6 2 2 6
57893- 2 2 6 2 2 6 30 30 30 78 78 78
57894-250 250 250 253 253 253 253 253 253 253 253 253
57895-253 253 253 253 253 253 253 253 253 253 253 253
57896-253 253 253 253 253 253 231 231 231 246 246 246
57897-253 253 253 253 253 253 253 253 253 253 253 253
57898-253 253 253 253 253 253 253 253 253 253 253 253
57899-253 253 253 253 253 253 253 253 253 253 253 253
57900-253 253 253 253 253 253 253 253 253 253 253 253
57901-253 253 253 253 253 253 206 206 206 2 2 6
57902- 22 22 22 34 34 34 18 14 6 22 22 22
57903- 26 26 26 18 18 18 6 6 6 2 2 6
57904- 2 2 6 82 82 82 54 54 54 18 18 18
57905- 6 6 6 0 0 0 0 0 0 0 0 0
57906- 0 0 0 0 0 0 0 0 0 0 0 0
57907- 0 0 0 0 0 0 0 0 0 0 0 0
57908- 0 0 0 0 0 0 0 0 0 0 0 0
57909- 0 0 0 0 0 0 0 0 0 0 0 0
57910- 0 0 0 0 0 0 0 0 0 0 0 0
57911- 0 0 0 0 0 0 6 6 6 26 26 26
57912- 62 62 62 106 106 106 74 54 14 185 133 11
57913-210 162 10 121 92 8 6 6 6 62 62 62
57914-238 238 238 253 253 253 253 253 253 253 253 253
57915-253 253 253 253 253 253 253 253 253 253 253 253
57916-253 253 253 253 253 253 231 231 231 246 246 246
57917-253 253 253 253 253 253 253 253 253 253 253 253
57918-253 253 253 253 253 253 253 253 253 253 253 253
57919-253 253 253 253 253 253 253 253 253 253 253 253
57920-253 253 253 253 253 253 253 253 253 253 253 253
57921-253 253 253 253 253 253 158 158 158 18 18 18
57922- 14 14 14 2 2 6 2 2 6 2 2 6
57923- 6 6 6 18 18 18 66 66 66 38 38 38
57924- 6 6 6 94 94 94 50 50 50 18 18 18
57925- 6 6 6 0 0 0 0 0 0 0 0 0
57926- 0 0 0 0 0 0 0 0 0 0 0 0
57927- 0 0 0 0 0 0 0 0 0 0 0 0
57928- 0 0 0 0 0 0 0 0 0 0 0 0
57929- 0 0 0 0 0 0 0 0 0 0 0 0
57930- 0 0 0 0 0 0 0 0 0 6 6 6
57931- 10 10 10 10 10 10 18 18 18 38 38 38
57932- 78 78 78 142 134 106 216 158 10 242 186 14
57933-246 190 14 246 190 14 156 118 10 10 10 10
57934- 90 90 90 238 238 238 253 253 253 253 253 253
57935-253 253 253 253 253 253 253 253 253 253 253 253
57936-253 253 253 253 253 253 231 231 231 250 250 250
57937-253 253 253 253 253 253 253 253 253 253 253 253
57938-253 253 253 253 253 253 253 253 253 253 253 253
57939-253 253 253 253 253 253 253 253 253 253 253 253
57940-253 253 253 253 253 253 253 253 253 246 230 190
57941-238 204 91 238 204 91 181 142 44 37 26 9
57942- 2 2 6 2 2 6 2 2 6 2 2 6
57943- 2 2 6 2 2 6 38 38 38 46 46 46
57944- 26 26 26 106 106 106 54 54 54 18 18 18
57945- 6 6 6 0 0 0 0 0 0 0 0 0
57946- 0 0 0 0 0 0 0 0 0 0 0 0
57947- 0 0 0 0 0 0 0 0 0 0 0 0
57948- 0 0 0 0 0 0 0 0 0 0 0 0
57949- 0 0 0 0 0 0 0 0 0 0 0 0
57950- 0 0 0 6 6 6 14 14 14 22 22 22
57951- 30 30 30 38 38 38 50 50 50 70 70 70
57952-106 106 106 190 142 34 226 170 11 242 186 14
57953-246 190 14 246 190 14 246 190 14 154 114 10
57954- 6 6 6 74 74 74 226 226 226 253 253 253
57955-253 253 253 253 253 253 253 253 253 253 253 253
57956-253 253 253 253 253 253 231 231 231 250 250 250
57957-253 253 253 253 253 253 253 253 253 253 253 253
57958-253 253 253 253 253 253 253 253 253 253 253 253
57959-253 253 253 253 253 253 253 253 253 253 253 253
57960-253 253 253 253 253 253 253 253 253 228 184 62
57961-241 196 14 241 208 19 232 195 16 38 30 10
57962- 2 2 6 2 2 6 2 2 6 2 2 6
57963- 2 2 6 6 6 6 30 30 30 26 26 26
57964-203 166 17 154 142 90 66 66 66 26 26 26
57965- 6 6 6 0 0 0 0 0 0 0 0 0
57966- 0 0 0 0 0 0 0 0 0 0 0 0
57967- 0 0 0 0 0 0 0 0 0 0 0 0
57968- 0 0 0 0 0 0 0 0 0 0 0 0
57969- 0 0 0 0 0 0 0 0 0 0 0 0
57970- 6 6 6 18 18 18 38 38 38 58 58 58
57971- 78 78 78 86 86 86 101 101 101 123 123 123
57972-175 146 61 210 150 10 234 174 13 246 186 14
57973-246 190 14 246 190 14 246 190 14 238 190 10
57974-102 78 10 2 2 6 46 46 46 198 198 198
57975-253 253 253 253 253 253 253 253 253 253 253 253
57976-253 253 253 253 253 253 234 234 234 242 242 242
57977-253 253 253 253 253 253 253 253 253 253 253 253
57978-253 253 253 253 253 253 253 253 253 253 253 253
57979-253 253 253 253 253 253 253 253 253 253 253 253
57980-253 253 253 253 253 253 253 253 253 224 178 62
57981-242 186 14 241 196 14 210 166 10 22 18 6
57982- 2 2 6 2 2 6 2 2 6 2 2 6
57983- 2 2 6 2 2 6 6 6 6 121 92 8
57984-238 202 15 232 195 16 82 82 82 34 34 34
57985- 10 10 10 0 0 0 0 0 0 0 0 0
57986- 0 0 0 0 0 0 0 0 0 0 0 0
57987- 0 0 0 0 0 0 0 0 0 0 0 0
57988- 0 0 0 0 0 0 0 0 0 0 0 0
57989- 0 0 0 0 0 0 0 0 0 0 0 0
57990- 14 14 14 38 38 38 70 70 70 154 122 46
57991-190 142 34 200 144 11 197 138 11 197 138 11
57992-213 154 11 226 170 11 242 186 14 246 190 14
57993-246 190 14 246 190 14 246 190 14 246 190 14
57994-225 175 15 46 32 6 2 2 6 22 22 22
57995-158 158 158 250 250 250 253 253 253 253 253 253
57996-253 253 253 253 253 253 253 253 253 253 253 253
57997-253 253 253 253 253 253 253 253 253 253 253 253
57998-253 253 253 253 253 253 253 253 253 253 253 253
57999-253 253 253 253 253 253 253 253 253 253 253 253
58000-253 253 253 250 250 250 242 242 242 224 178 62
58001-239 182 13 236 186 11 213 154 11 46 32 6
58002- 2 2 6 2 2 6 2 2 6 2 2 6
58003- 2 2 6 2 2 6 61 42 6 225 175 15
58004-238 190 10 236 186 11 112 100 78 42 42 42
58005- 14 14 14 0 0 0 0 0 0 0 0 0
58006- 0 0 0 0 0 0 0 0 0 0 0 0
58007- 0 0 0 0 0 0 0 0 0 0 0 0
58008- 0 0 0 0 0 0 0 0 0 0 0 0
58009- 0 0 0 0 0 0 0 0 0 6 6 6
58010- 22 22 22 54 54 54 154 122 46 213 154 11
58011-226 170 11 230 174 11 226 170 11 226 170 11
58012-236 178 12 242 186 14 246 190 14 246 190 14
58013-246 190 14 246 190 14 246 190 14 246 190 14
58014-241 196 14 184 144 12 10 10 10 2 2 6
58015- 6 6 6 116 116 116 242 242 242 253 253 253
58016-253 253 253 253 253 253 253 253 253 253 253 253
58017-253 253 253 253 253 253 253 253 253 253 253 253
58018-253 253 253 253 253 253 253 253 253 253 253 253
58019-253 253 253 253 253 253 253 253 253 253 253 253
58020-253 253 253 231 231 231 198 198 198 214 170 54
58021-236 178 12 236 178 12 210 150 10 137 92 6
58022- 18 14 6 2 2 6 2 2 6 2 2 6
58023- 6 6 6 70 47 6 200 144 11 236 178 12
58024-239 182 13 239 182 13 124 112 88 58 58 58
58025- 22 22 22 6 6 6 0 0 0 0 0 0
58026- 0 0 0 0 0 0 0 0 0 0 0 0
58027- 0 0 0 0 0 0 0 0 0 0 0 0
58028- 0 0 0 0 0 0 0 0 0 0 0 0
58029- 0 0 0 0 0 0 0 0 0 10 10 10
58030- 30 30 30 70 70 70 180 133 36 226 170 11
58031-239 182 13 242 186 14 242 186 14 246 186 14
58032-246 190 14 246 190 14 246 190 14 246 190 14
58033-246 190 14 246 190 14 246 190 14 246 190 14
58034-246 190 14 232 195 16 98 70 6 2 2 6
58035- 2 2 6 2 2 6 66 66 66 221 221 221
58036-253 253 253 253 253 253 253 253 253 253 253 253
58037-253 253 253 253 253 253 253 253 253 253 253 253
58038-253 253 253 253 253 253 253 253 253 253 253 253
58039-253 253 253 253 253 253 253 253 253 253 253 253
58040-253 253 253 206 206 206 198 198 198 214 166 58
58041-230 174 11 230 174 11 216 158 10 192 133 9
58042-163 110 8 116 81 8 102 78 10 116 81 8
58043-167 114 7 197 138 11 226 170 11 239 182 13
58044-242 186 14 242 186 14 162 146 94 78 78 78
58045- 34 34 34 14 14 14 6 6 6 0 0 0
58046- 0 0 0 0 0 0 0 0 0 0 0 0
58047- 0 0 0 0 0 0 0 0 0 0 0 0
58048- 0 0 0 0 0 0 0 0 0 0 0 0
58049- 0 0 0 0 0 0 0 0 0 6 6 6
58050- 30 30 30 78 78 78 190 142 34 226 170 11
58051-239 182 13 246 190 14 246 190 14 246 190 14
58052-246 190 14 246 190 14 246 190 14 246 190 14
58053-246 190 14 246 190 14 246 190 14 246 190 14
58054-246 190 14 241 196 14 203 166 17 22 18 6
58055- 2 2 6 2 2 6 2 2 6 38 38 38
58056-218 218 218 253 253 253 253 253 253 253 253 253
58057-253 253 253 253 253 253 253 253 253 253 253 253
58058-253 253 253 253 253 253 253 253 253 253 253 253
58059-253 253 253 253 253 253 253 253 253 253 253 253
58060-250 250 250 206 206 206 198 198 198 202 162 69
58061-226 170 11 236 178 12 224 166 10 210 150 10
58062-200 144 11 197 138 11 192 133 9 197 138 11
58063-210 150 10 226 170 11 242 186 14 246 190 14
58064-246 190 14 246 186 14 225 175 15 124 112 88
58065- 62 62 62 30 30 30 14 14 14 6 6 6
58066- 0 0 0 0 0 0 0 0 0 0 0 0
58067- 0 0 0 0 0 0 0 0 0 0 0 0
58068- 0 0 0 0 0 0 0 0 0 0 0 0
58069- 0 0 0 0 0 0 0 0 0 10 10 10
58070- 30 30 30 78 78 78 174 135 50 224 166 10
58071-239 182 13 246 190 14 246 190 14 246 190 14
58072-246 190 14 246 190 14 246 190 14 246 190 14
58073-246 190 14 246 190 14 246 190 14 246 190 14
58074-246 190 14 246 190 14 241 196 14 139 102 15
58075- 2 2 6 2 2 6 2 2 6 2 2 6
58076- 78 78 78 250 250 250 253 253 253 253 253 253
58077-253 253 253 253 253 253 253 253 253 253 253 253
58078-253 253 253 253 253 253 253 253 253 253 253 253
58079-253 253 253 253 253 253 253 253 253 253 253 253
58080-250 250 250 214 214 214 198 198 198 190 150 46
58081-219 162 10 236 178 12 234 174 13 224 166 10
58082-216 158 10 213 154 11 213 154 11 216 158 10
58083-226 170 11 239 182 13 246 190 14 246 190 14
58084-246 190 14 246 190 14 242 186 14 206 162 42
58085-101 101 101 58 58 58 30 30 30 14 14 14
58086- 6 6 6 0 0 0 0 0 0 0 0 0
58087- 0 0 0 0 0 0 0 0 0 0 0 0
58088- 0 0 0 0 0 0 0 0 0 0 0 0
58089- 0 0 0 0 0 0 0 0 0 10 10 10
58090- 30 30 30 74 74 74 174 135 50 216 158 10
58091-236 178 12 246 190 14 246 190 14 246 190 14
58092-246 190 14 246 190 14 246 190 14 246 190 14
58093-246 190 14 246 190 14 246 190 14 246 190 14
58094-246 190 14 246 190 14 241 196 14 226 184 13
58095- 61 42 6 2 2 6 2 2 6 2 2 6
58096- 22 22 22 238 238 238 253 253 253 253 253 253
58097-253 253 253 253 253 253 253 253 253 253 253 253
58098-253 253 253 253 253 253 253 253 253 253 253 253
58099-253 253 253 253 253 253 253 253 253 253 253 253
58100-253 253 253 226 226 226 187 187 187 180 133 36
58101-216 158 10 236 178 12 239 182 13 236 178 12
58102-230 174 11 226 170 11 226 170 11 230 174 11
58103-236 178 12 242 186 14 246 190 14 246 190 14
58104-246 190 14 246 190 14 246 186 14 239 182 13
58105-206 162 42 106 106 106 66 66 66 34 34 34
58106- 14 14 14 6 6 6 0 0 0 0 0 0
58107- 0 0 0 0 0 0 0 0 0 0 0 0
58108- 0 0 0 0 0 0 0 0 0 0 0 0
58109- 0 0 0 0 0 0 0 0 0 6 6 6
58110- 26 26 26 70 70 70 163 133 67 213 154 11
58111-236 178 12 246 190 14 246 190 14 246 190 14
58112-246 190 14 246 190 14 246 190 14 246 190 14
58113-246 190 14 246 190 14 246 190 14 246 190 14
58114-246 190 14 246 190 14 246 190 14 241 196 14
58115-190 146 13 18 14 6 2 2 6 2 2 6
58116- 46 46 46 246 246 246 253 253 253 253 253 253
58117-253 253 253 253 253 253 253 253 253 253 253 253
58118-253 253 253 253 253 253 253 253 253 253 253 253
58119-253 253 253 253 253 253 253 253 253 253 253 253
58120-253 253 253 221 221 221 86 86 86 156 107 11
58121-216 158 10 236 178 12 242 186 14 246 186 14
58122-242 186 14 239 182 13 239 182 13 242 186 14
58123-242 186 14 246 186 14 246 190 14 246 190 14
58124-246 190 14 246 190 14 246 190 14 246 190 14
58125-242 186 14 225 175 15 142 122 72 66 66 66
58126- 30 30 30 10 10 10 0 0 0 0 0 0
58127- 0 0 0 0 0 0 0 0 0 0 0 0
58128- 0 0 0 0 0 0 0 0 0 0 0 0
58129- 0 0 0 0 0 0 0 0 0 6 6 6
58130- 26 26 26 70 70 70 163 133 67 210 150 10
58131-236 178 12 246 190 14 246 190 14 246 190 14
58132-246 190 14 246 190 14 246 190 14 246 190 14
58133-246 190 14 246 190 14 246 190 14 246 190 14
58134-246 190 14 246 190 14 246 190 14 246 190 14
58135-232 195 16 121 92 8 34 34 34 106 106 106
58136-221 221 221 253 253 253 253 253 253 253 253 253
58137-253 253 253 253 253 253 253 253 253 253 253 253
58138-253 253 253 253 253 253 253 253 253 253 253 253
58139-253 253 253 253 253 253 253 253 253 253 253 253
58140-242 242 242 82 82 82 18 14 6 163 110 8
58141-216 158 10 236 178 12 242 186 14 246 190 14
58142-246 190 14 246 190 14 246 190 14 246 190 14
58143-246 190 14 246 190 14 246 190 14 246 190 14
58144-246 190 14 246 190 14 246 190 14 246 190 14
58145-246 190 14 246 190 14 242 186 14 163 133 67
58146- 46 46 46 18 18 18 6 6 6 0 0 0
58147- 0 0 0 0 0 0 0 0 0 0 0 0
58148- 0 0 0 0 0 0 0 0 0 0 0 0
58149- 0 0 0 0 0 0 0 0 0 10 10 10
58150- 30 30 30 78 78 78 163 133 67 210 150 10
58151-236 178 12 246 186 14 246 190 14 246 190 14
58152-246 190 14 246 190 14 246 190 14 246 190 14
58153-246 190 14 246 190 14 246 190 14 246 190 14
58154-246 190 14 246 190 14 246 190 14 246 190 14
58155-241 196 14 215 174 15 190 178 144 253 253 253
58156-253 253 253 253 253 253 253 253 253 253 253 253
58157-253 253 253 253 253 253 253 253 253 253 253 253
58158-253 253 253 253 253 253 253 253 253 253 253 253
58159-253 253 253 253 253 253 253 253 253 218 218 218
58160- 58 58 58 2 2 6 22 18 6 167 114 7
58161-216 158 10 236 178 12 246 186 14 246 190 14
58162-246 190 14 246 190 14 246 190 14 246 190 14
58163-246 190 14 246 190 14 246 190 14 246 190 14
58164-246 190 14 246 190 14 246 190 14 246 190 14
58165-246 190 14 246 186 14 242 186 14 190 150 46
58166- 54 54 54 22 22 22 6 6 6 0 0 0
58167- 0 0 0 0 0 0 0 0 0 0 0 0
58168- 0 0 0 0 0 0 0 0 0 0 0 0
58169- 0 0 0 0 0 0 0 0 0 14 14 14
58170- 38 38 38 86 86 86 180 133 36 213 154 11
58171-236 178 12 246 186 14 246 190 14 246 190 14
58172-246 190 14 246 190 14 246 190 14 246 190 14
58173-246 190 14 246 190 14 246 190 14 246 190 14
58174-246 190 14 246 190 14 246 190 14 246 190 14
58175-246 190 14 232 195 16 190 146 13 214 214 214
58176-253 253 253 253 253 253 253 253 253 253 253 253
58177-253 253 253 253 253 253 253 253 253 253 253 253
58178-253 253 253 253 253 253 253 253 253 253 253 253
58179-253 253 253 250 250 250 170 170 170 26 26 26
58180- 2 2 6 2 2 6 37 26 9 163 110 8
58181-219 162 10 239 182 13 246 186 14 246 190 14
58182-246 190 14 246 190 14 246 190 14 246 190 14
58183-246 190 14 246 190 14 246 190 14 246 190 14
58184-246 190 14 246 190 14 246 190 14 246 190 14
58185-246 186 14 236 178 12 224 166 10 142 122 72
58186- 46 46 46 18 18 18 6 6 6 0 0 0
58187- 0 0 0 0 0 0 0 0 0 0 0 0
58188- 0 0 0 0 0 0 0 0 0 0 0 0
58189- 0 0 0 0 0 0 6 6 6 18 18 18
58190- 50 50 50 109 106 95 192 133 9 224 166 10
58191-242 186 14 246 190 14 246 190 14 246 190 14
58192-246 190 14 246 190 14 246 190 14 246 190 14
58193-246 190 14 246 190 14 246 190 14 246 190 14
58194-246 190 14 246 190 14 246 190 14 246 190 14
58195-242 186 14 226 184 13 210 162 10 142 110 46
58196-226 226 226 253 253 253 253 253 253 253 253 253
58197-253 253 253 253 253 253 253 253 253 253 253 253
58198-253 253 253 253 253 253 253 253 253 253 253 253
58199-198 198 198 66 66 66 2 2 6 2 2 6
58200- 2 2 6 2 2 6 50 34 6 156 107 11
58201-219 162 10 239 182 13 246 186 14 246 190 14
58202-246 190 14 246 190 14 246 190 14 246 190 14
58203-246 190 14 246 190 14 246 190 14 246 190 14
58204-246 190 14 246 190 14 246 190 14 242 186 14
58205-234 174 13 213 154 11 154 122 46 66 66 66
58206- 30 30 30 10 10 10 0 0 0 0 0 0
58207- 0 0 0 0 0 0 0 0 0 0 0 0
58208- 0 0 0 0 0 0 0 0 0 0 0 0
58209- 0 0 0 0 0 0 6 6 6 22 22 22
58210- 58 58 58 154 121 60 206 145 10 234 174 13
58211-242 186 14 246 186 14 246 190 14 246 190 14
58212-246 190 14 246 190 14 246 190 14 246 190 14
58213-246 190 14 246 190 14 246 190 14 246 190 14
58214-246 190 14 246 190 14 246 190 14 246 190 14
58215-246 186 14 236 178 12 210 162 10 163 110 8
58216- 61 42 6 138 138 138 218 218 218 250 250 250
58217-253 253 253 253 253 253 253 253 253 250 250 250
58218-242 242 242 210 210 210 144 144 144 66 66 66
58219- 6 6 6 2 2 6 2 2 6 2 2 6
58220- 2 2 6 2 2 6 61 42 6 163 110 8
58221-216 158 10 236 178 12 246 190 14 246 190 14
58222-246 190 14 246 190 14 246 190 14 246 190 14
58223-246 190 14 246 190 14 246 190 14 246 190 14
58224-246 190 14 239 182 13 230 174 11 216 158 10
58225-190 142 34 124 112 88 70 70 70 38 38 38
58226- 18 18 18 6 6 6 0 0 0 0 0 0
58227- 0 0 0 0 0 0 0 0 0 0 0 0
58228- 0 0 0 0 0 0 0 0 0 0 0 0
58229- 0 0 0 0 0 0 6 6 6 22 22 22
58230- 62 62 62 168 124 44 206 145 10 224 166 10
58231-236 178 12 239 182 13 242 186 14 242 186 14
58232-246 186 14 246 190 14 246 190 14 246 190 14
58233-246 190 14 246 190 14 246 190 14 246 190 14
58234-246 190 14 246 190 14 246 190 14 246 190 14
58235-246 190 14 236 178 12 216 158 10 175 118 6
58236- 80 54 7 2 2 6 6 6 6 30 30 30
58237- 54 54 54 62 62 62 50 50 50 38 38 38
58238- 14 14 14 2 2 6 2 2 6 2 2 6
58239- 2 2 6 2 2 6 2 2 6 2 2 6
58240- 2 2 6 6 6 6 80 54 7 167 114 7
58241-213 154 11 236 178 12 246 190 14 246 190 14
58242-246 190 14 246 190 14 246 190 14 246 190 14
58243-246 190 14 242 186 14 239 182 13 239 182 13
58244-230 174 11 210 150 10 174 135 50 124 112 88
58245- 82 82 82 54 54 54 34 34 34 18 18 18
58246- 6 6 6 0 0 0 0 0 0 0 0 0
58247- 0 0 0 0 0 0 0 0 0 0 0 0
58248- 0 0 0 0 0 0 0 0 0 0 0 0
58249- 0 0 0 0 0 0 6 6 6 18 18 18
58250- 50 50 50 158 118 36 192 133 9 200 144 11
58251-216 158 10 219 162 10 224 166 10 226 170 11
58252-230 174 11 236 178 12 239 182 13 239 182 13
58253-242 186 14 246 186 14 246 190 14 246 190 14
58254-246 190 14 246 190 14 246 190 14 246 190 14
58255-246 186 14 230 174 11 210 150 10 163 110 8
58256-104 69 6 10 10 10 2 2 6 2 2 6
58257- 2 2 6 2 2 6 2 2 6 2 2 6
58258- 2 2 6 2 2 6 2 2 6 2 2 6
58259- 2 2 6 2 2 6 2 2 6 2 2 6
58260- 2 2 6 6 6 6 91 60 6 167 114 7
58261-206 145 10 230 174 11 242 186 14 246 190 14
58262-246 190 14 246 190 14 246 186 14 242 186 14
58263-239 182 13 230 174 11 224 166 10 213 154 11
58264-180 133 36 124 112 88 86 86 86 58 58 58
58265- 38 38 38 22 22 22 10 10 10 6 6 6
58266- 0 0 0 0 0 0 0 0 0 0 0 0
58267- 0 0 0 0 0 0 0 0 0 0 0 0
58268- 0 0 0 0 0 0 0 0 0 0 0 0
58269- 0 0 0 0 0 0 0 0 0 14 14 14
58270- 34 34 34 70 70 70 138 110 50 158 118 36
58271-167 114 7 180 123 7 192 133 9 197 138 11
58272-200 144 11 206 145 10 213 154 11 219 162 10
58273-224 166 10 230 174 11 239 182 13 242 186 14
58274-246 186 14 246 186 14 246 186 14 246 186 14
58275-239 182 13 216 158 10 185 133 11 152 99 6
58276-104 69 6 18 14 6 2 2 6 2 2 6
58277- 2 2 6 2 2 6 2 2 6 2 2 6
58278- 2 2 6 2 2 6 2 2 6 2 2 6
58279- 2 2 6 2 2 6 2 2 6 2 2 6
58280- 2 2 6 6 6 6 80 54 7 152 99 6
58281-192 133 9 219 162 10 236 178 12 239 182 13
58282-246 186 14 242 186 14 239 182 13 236 178 12
58283-224 166 10 206 145 10 192 133 9 154 121 60
58284- 94 94 94 62 62 62 42 42 42 22 22 22
58285- 14 14 14 6 6 6 0 0 0 0 0 0
58286- 0 0 0 0 0 0 0 0 0 0 0 0
58287- 0 0 0 0 0 0 0 0 0 0 0 0
58288- 0 0 0 0 0 0 0 0 0 0 0 0
58289- 0 0 0 0 0 0 0 0 0 6 6 6
58290- 18 18 18 34 34 34 58 58 58 78 78 78
58291-101 98 89 124 112 88 142 110 46 156 107 11
58292-163 110 8 167 114 7 175 118 6 180 123 7
58293-185 133 11 197 138 11 210 150 10 219 162 10
58294-226 170 11 236 178 12 236 178 12 234 174 13
58295-219 162 10 197 138 11 163 110 8 130 83 6
58296- 91 60 6 10 10 10 2 2 6 2 2 6
58297- 18 18 18 38 38 38 38 38 38 38 38 38
58298- 38 38 38 38 38 38 38 38 38 38 38 38
58299- 38 38 38 38 38 38 26 26 26 2 2 6
58300- 2 2 6 6 6 6 70 47 6 137 92 6
58301-175 118 6 200 144 11 219 162 10 230 174 11
58302-234 174 13 230 174 11 219 162 10 210 150 10
58303-192 133 9 163 110 8 124 112 88 82 82 82
58304- 50 50 50 30 30 30 14 14 14 6 6 6
58305- 0 0 0 0 0 0 0 0 0 0 0 0
58306- 0 0 0 0 0 0 0 0 0 0 0 0
58307- 0 0 0 0 0 0 0 0 0 0 0 0
58308- 0 0 0 0 0 0 0 0 0 0 0 0
58309- 0 0 0 0 0 0 0 0 0 0 0 0
58310- 6 6 6 14 14 14 22 22 22 34 34 34
58311- 42 42 42 58 58 58 74 74 74 86 86 86
58312-101 98 89 122 102 70 130 98 46 121 87 25
58313-137 92 6 152 99 6 163 110 8 180 123 7
58314-185 133 11 197 138 11 206 145 10 200 144 11
58315-180 123 7 156 107 11 130 83 6 104 69 6
58316- 50 34 6 54 54 54 110 110 110 101 98 89
58317- 86 86 86 82 82 82 78 78 78 78 78 78
58318- 78 78 78 78 78 78 78 78 78 78 78 78
58319- 78 78 78 82 82 82 86 86 86 94 94 94
58320-106 106 106 101 101 101 86 66 34 124 80 6
58321-156 107 11 180 123 7 192 133 9 200 144 11
58322-206 145 10 200 144 11 192 133 9 175 118 6
58323-139 102 15 109 106 95 70 70 70 42 42 42
58324- 22 22 22 10 10 10 0 0 0 0 0 0
58325- 0 0 0 0 0 0 0 0 0 0 0 0
58326- 0 0 0 0 0 0 0 0 0 0 0 0
58327- 0 0 0 0 0 0 0 0 0 0 0 0
58328- 0 0 0 0 0 0 0 0 0 0 0 0
58329- 0 0 0 0 0 0 0 0 0 0 0 0
58330- 0 0 0 0 0 0 6 6 6 10 10 10
58331- 14 14 14 22 22 22 30 30 30 38 38 38
58332- 50 50 50 62 62 62 74 74 74 90 90 90
58333-101 98 89 112 100 78 121 87 25 124 80 6
58334-137 92 6 152 99 6 152 99 6 152 99 6
58335-138 86 6 124 80 6 98 70 6 86 66 30
58336-101 98 89 82 82 82 58 58 58 46 46 46
58337- 38 38 38 34 34 34 34 34 34 34 34 34
58338- 34 34 34 34 34 34 34 34 34 34 34 34
58339- 34 34 34 34 34 34 38 38 38 42 42 42
58340- 54 54 54 82 82 82 94 86 76 91 60 6
58341-134 86 6 156 107 11 167 114 7 175 118 6
58342-175 118 6 167 114 7 152 99 6 121 87 25
58343-101 98 89 62 62 62 34 34 34 18 18 18
58344- 6 6 6 0 0 0 0 0 0 0 0 0
58345- 0 0 0 0 0 0 0 0 0 0 0 0
58346- 0 0 0 0 0 0 0 0 0 0 0 0
58347- 0 0 0 0 0 0 0 0 0 0 0 0
58348- 0 0 0 0 0 0 0 0 0 0 0 0
58349- 0 0 0 0 0 0 0 0 0 0 0 0
58350- 0 0 0 0 0 0 0 0 0 0 0 0
58351- 0 0 0 6 6 6 6 6 6 10 10 10
58352- 18 18 18 22 22 22 30 30 30 42 42 42
58353- 50 50 50 66 66 66 86 86 86 101 98 89
58354-106 86 58 98 70 6 104 69 6 104 69 6
58355-104 69 6 91 60 6 82 62 34 90 90 90
58356- 62 62 62 38 38 38 22 22 22 14 14 14
58357- 10 10 10 10 10 10 10 10 10 10 10 10
58358- 10 10 10 10 10 10 6 6 6 10 10 10
58359- 10 10 10 10 10 10 10 10 10 14 14 14
58360- 22 22 22 42 42 42 70 70 70 89 81 66
58361- 80 54 7 104 69 6 124 80 6 137 92 6
58362-134 86 6 116 81 8 100 82 52 86 86 86
58363- 58 58 58 30 30 30 14 14 14 6 6 6
58364- 0 0 0 0 0 0 0 0 0 0 0 0
58365- 0 0 0 0 0 0 0 0 0 0 0 0
58366- 0 0 0 0 0 0 0 0 0 0 0 0
58367- 0 0 0 0 0 0 0 0 0 0 0 0
58368- 0 0 0 0 0 0 0 0 0 0 0 0
58369- 0 0 0 0 0 0 0 0 0 0 0 0
58370- 0 0 0 0 0 0 0 0 0 0 0 0
58371- 0 0 0 0 0 0 0 0 0 0 0 0
58372- 0 0 0 6 6 6 10 10 10 14 14 14
58373- 18 18 18 26 26 26 38 38 38 54 54 54
58374- 70 70 70 86 86 86 94 86 76 89 81 66
58375- 89 81 66 86 86 86 74 74 74 50 50 50
58376- 30 30 30 14 14 14 6 6 6 0 0 0
58377- 0 0 0 0 0 0 0 0 0 0 0 0
58378- 0 0 0 0 0 0 0 0 0 0 0 0
58379- 0 0 0 0 0 0 0 0 0 0 0 0
58380- 6 6 6 18 18 18 34 34 34 58 58 58
58381- 82 82 82 89 81 66 89 81 66 89 81 66
58382- 94 86 66 94 86 76 74 74 74 50 50 50
58383- 26 26 26 14 14 14 6 6 6 0 0 0
58384- 0 0 0 0 0 0 0 0 0 0 0 0
58385- 0 0 0 0 0 0 0 0 0 0 0 0
58386- 0 0 0 0 0 0 0 0 0 0 0 0
58387- 0 0 0 0 0 0 0 0 0 0 0 0
58388- 0 0 0 0 0 0 0 0 0 0 0 0
58389- 0 0 0 0 0 0 0 0 0 0 0 0
58390- 0 0 0 0 0 0 0 0 0 0 0 0
58391- 0 0 0 0 0 0 0 0 0 0 0 0
58392- 0 0 0 0 0 0 0 0 0 0 0 0
58393- 6 6 6 6 6 6 14 14 14 18 18 18
58394- 30 30 30 38 38 38 46 46 46 54 54 54
58395- 50 50 50 42 42 42 30 30 30 18 18 18
58396- 10 10 10 0 0 0 0 0 0 0 0 0
58397- 0 0 0 0 0 0 0 0 0 0 0 0
58398- 0 0 0 0 0 0 0 0 0 0 0 0
58399- 0 0 0 0 0 0 0 0 0 0 0 0
58400- 0 0 0 6 6 6 14 14 14 26 26 26
58401- 38 38 38 50 50 50 58 58 58 58 58 58
58402- 54 54 54 42 42 42 30 30 30 18 18 18
58403- 10 10 10 0 0 0 0 0 0 0 0 0
58404- 0 0 0 0 0 0 0 0 0 0 0 0
58405- 0 0 0 0 0 0 0 0 0 0 0 0
58406- 0 0 0 0 0 0 0 0 0 0 0 0
58407- 0 0 0 0 0 0 0 0 0 0 0 0
58408- 0 0 0 0 0 0 0 0 0 0 0 0
58409- 0 0 0 0 0 0 0 0 0 0 0 0
58410- 0 0 0 0 0 0 0 0 0 0 0 0
58411- 0 0 0 0 0 0 0 0 0 0 0 0
58412- 0 0 0 0 0 0 0 0 0 0 0 0
58413- 0 0 0 0 0 0 0 0 0 6 6 6
58414- 6 6 6 10 10 10 14 14 14 18 18 18
58415- 18 18 18 14 14 14 10 10 10 6 6 6
58416- 0 0 0 0 0 0 0 0 0 0 0 0
58417- 0 0 0 0 0 0 0 0 0 0 0 0
58418- 0 0 0 0 0 0 0 0 0 0 0 0
58419- 0 0 0 0 0 0 0 0 0 0 0 0
58420- 0 0 0 0 0 0 0 0 0 6 6 6
58421- 14 14 14 18 18 18 22 22 22 22 22 22
58422- 18 18 18 14 14 14 10 10 10 6 6 6
58423- 0 0 0 0 0 0 0 0 0 0 0 0
58424- 0 0 0 0 0 0 0 0 0 0 0 0
58425- 0 0 0 0 0 0 0 0 0 0 0 0
58426- 0 0 0 0 0 0 0 0 0 0 0 0
58427- 0 0 0 0 0 0 0 0 0 0 0 0
58428+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58429+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58430+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58431+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58432+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58433+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58434+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58435+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58436+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58437+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58438+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58439+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58440+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58441+4 4 4 4 4 4
58442+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58443+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58444+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58445+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58446+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58447+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58448+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58449+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58450+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58451+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58452+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58453+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58454+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58455+4 4 4 4 4 4
58456+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58457+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58458+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58459+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58460+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58461+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58462+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58463+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58464+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58465+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58466+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58467+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58468+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58469+4 4 4 4 4 4
58470+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58471+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58472+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58473+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58474+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58475+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58476+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58477+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58478+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58479+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58480+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58481+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58482+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58483+4 4 4 4 4 4
58484+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58485+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58486+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58487+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58488+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58489+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58490+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58491+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58492+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58493+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58494+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58495+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58496+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58497+4 4 4 4 4 4
58498+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58499+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58500+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58501+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58502+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58503+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58504+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58505+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58506+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58507+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58508+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58509+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58510+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58511+4 4 4 4 4 4
58512+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58513+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58514+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58515+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58516+4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
58517+0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
58518+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58519+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58520+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58521+4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
58522+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
58523+4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
58524+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58525+4 4 4 4 4 4
58526+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58527+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58528+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58529+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58530+4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
58531+37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
58532+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58533+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58534+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58535+4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
58536+2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
58537+4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
58538+1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58539+4 4 4 4 4 4
58540+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58541+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58542+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58543+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58544+2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
58545+153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
58546+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
58547+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58548+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58549+4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
58550+60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
58551+4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
58552+2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
58553+4 4 4 4 4 4
58554+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58555+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58556+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58557+4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
58558+4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
58559+165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
58560+1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
58561+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58562+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
58563+3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
58564+163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
58565+0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
58566+37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
58567+4 4 4 4 4 4
58568+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58569+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58570+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58571+4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
58572+37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
58573+156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
58574+125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
58575+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
58576+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
58577+0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
58578+174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
58579+0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
58580+64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
58581+4 4 4 4 4 4
58582+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58583+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58584+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
58585+5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
58586+156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
58587+156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
58588+174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
58589+1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
58590+4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
58591+13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
58592+174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
58593+22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
58594+90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
58595+4 4 4 4 4 4
58596+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58597+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58598+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
58599+0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
58600+174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
58601+156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
58602+163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
58603+4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
58604+5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
58605+131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
58606+190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
58607+90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
58608+31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
58609+4 4 4 4 4 4
58610+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58611+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58612+4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
58613+4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
58614+155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
58615+167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
58616+153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
58617+41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
58618+1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
58619+177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
58620+125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
58621+136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
58622+7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
58623+4 4 4 4 4 4
58624+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58625+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58626+4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
58627+125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
58628+156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
58629+137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
58630+156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
58631+167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
58632+0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
58633+166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
58634+6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
58635+90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
58636+1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
58637+4 4 4 4 4 4
58638+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58639+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58640+1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
58641+167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
58642+157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
58643+26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
58644+158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
58645+165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
58646+60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
58647+137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
58648+52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
58649+13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
58650+4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
58651+4 4 4 4 4 4
58652+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58653+4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
58654+0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
58655+158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
58656+167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
58657+4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
58658+174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
58659+155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
58660+137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
58661+16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
58662+136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
58663+2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
58664+4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
58665+4 4 4 4 4 4
58666+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58667+4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
58668+37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
58669+157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
58670+153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
58671+4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
58672+125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
58673+156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
58674+174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
58675+4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
58676+136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
58677+1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
58678+2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
58679+0 0 0 4 4 4
58680+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
58681+4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
58682+158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
58683+153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
58684+37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
58685+4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
58686+4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
58687+154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
58688+174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
58689+32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
58690+28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
58691+50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
58692+0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
58693+2 0 0 0 0 0
58694+4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
58695+0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
58696+174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
58697+165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
58698+4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
58699+4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
58700+4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
58701+174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
58702+60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
58703+136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
58704+22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
58705+136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
58706+26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
58707+37 38 37 0 0 0
58708+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
58709+13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
58710+153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
58711+177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
58712+4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
58713+5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
58714+6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
58715+166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
58716+4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
58717+146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
58718+71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
58719+90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
58720+125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
58721+85 115 134 4 0 0
58722+4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
58723+125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
58724+155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
58725+125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
58726+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
58727+0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
58728+5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
58729+37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
58730+4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
58731+90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
58732+2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
58733+13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
58734+166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
58735+60 73 81 4 0 0
58736+4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
58737+174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
58738+156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
58739+4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
58740+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
58741+10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
58742+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
58743+4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
58744+80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
58745+28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
58746+50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
58747+1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
58748+167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
58749+16 19 21 4 0 0
58750+4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
58751+158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
58752+167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
58753+4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
58754+4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
58755+80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
58756+4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
58757+3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
58758+146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
58759+68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
58760+136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
58761+24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
58762+163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
58763+4 0 0 4 3 3
58764+3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
58765+156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
58766+155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
58767+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
58768+2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
58769+136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
58770+0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
58771+0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
58772+136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
58773+28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
58774+22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
58775+137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
58776+60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
58777+3 2 2 4 4 4
58778+3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
58779+157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
58780+37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
58781+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
58782+0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
58783+101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
58784+14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
58785+22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
58786+136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
58787+17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
58788+2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
58789+166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
58790+13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
58791+4 4 4 4 4 4
58792+1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
58793+163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
58794+4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
58795+4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
58796+40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
58797+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
58798+101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
58799+136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
58800+136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
58801+136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
58802+3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
58803+174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
58804+4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
58805+4 4 4 4 4 4
58806+4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
58807+155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
58808+4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
58809+4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
58810+101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
58811+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
58812+136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
58813+136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
58814+136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
58815+90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
58816+85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
58817+167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
58818+6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
58819+5 5 5 5 5 5
58820+1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
58821+131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
58822+6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
58823+0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
58824+101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
58825+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
58826+101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
58827+136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
58828+101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
58829+7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
58830+174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
58831+24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
58832+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
58833+5 5 5 4 4 4
58834+4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
58835+131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
58836+6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
58837+13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
58838+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
58839+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
58840+101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
58841+136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
58842+136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
58843+2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
58844+174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
58845+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
58846+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58847+4 4 4 4 4 4
58848+1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
58849+137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
58850+4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
58851+64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
58852+90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
58853+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
58854+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
58855+136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
58856+101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
58857+37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
58858+167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
58859+3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
58860+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58861+4 4 4 4 4 4
58862+4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
58863+153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
58864+4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
58865+90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
58866+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
58867+90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
58868+101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
58869+101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
58870+35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
58871+154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
58872+60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
58873+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58874+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58875+4 4 4 4 4 4
58876+1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
58877+153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
58878+4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
58879+64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
58880+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
58881+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
58882+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
58883+136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
58884+13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
58885+174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
58886+6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
58887+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58888+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58889+4 4 4 4 4 4
58890+4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
58891+156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
58892+4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
58893+90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
58894+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
58895+90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
58896+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
58897+101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
58898+2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
58899+174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
58900+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58901+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58902+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58903+4 4 4 4 4 4
58904+3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
58905+158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
58906+4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
58907+37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
58908+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
58909+90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
58910+101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
58911+90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
58912+5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
58913+167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
58914+6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
58915+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58916+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58917+4 4 4 4 4 4
58918+4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
58919+163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
58920+4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
58921+18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
58922+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
58923+90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
58924+101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
58925+13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
58926+3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
58927+174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
58928+4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
58929+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58930+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58931+4 4 4 4 4 4
58932+1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
58933+167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
58934+4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
58935+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
58936+26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
58937+90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
58938+101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
58939+7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
58940+4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
58941+174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
58942+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58943+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58944+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58945+4 4 4 4 4 4
58946+4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
58947+174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
58948+5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
58949+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
58950+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
58951+90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
58952+101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
58953+2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
58954+3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
58955+153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
58956+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58957+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58958+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58959+4 4 4 4 4 4
58960+1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
58961+174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
58962+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
58963+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
58964+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
58965+26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
58966+35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
58967+2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
58968+3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
58969+131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
58970+4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58971+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58972+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58973+4 4 4 4 4 4
58974+3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
58975+174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
58976+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
58977+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
58978+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
58979+26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
58980+7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
58981+4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
58982+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
58983+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
58984+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58985+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58986+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58987+4 4 4 4 4 4
58988+1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
58989+174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
58990+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
58991+18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
58992+18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
58993+26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
58994+28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
58995+3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
58996+4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
58997+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
58998+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58999+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59000+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59001+4 4 4 4 4 4
59002+4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
59003+174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
59004+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
59005+10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
59006+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
59007+18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
59008+90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
59009+3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
59010+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
59011+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
59012+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
59013+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59014+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59015+4 4 4 4 4 4
59016+1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
59017+177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
59018+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
59019+10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
59020+26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
59021+6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
59022+10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
59023+2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
59024+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
59025+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
59026+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
59027+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59028+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59029+4 4 4 4 4 4
59030+4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
59031+177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
59032+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
59033+10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
59034+26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
59035+7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
59036+3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
59037+21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
59038+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
59039+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
59040+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
59041+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59042+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59043+4 4 4 4 4 4
59044+3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
59045+190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
59046+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
59047+10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
59048+24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
59049+18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
59050+28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
59051+26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
59052+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
59053+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
59054+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
59055+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59056+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59057+4 4 4 4 4 4
59058+4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
59059+190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
59060+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
59061+10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
59062+0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
59063+26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
59064+37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
59065+90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
59066+4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
59067+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
59068+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
59069+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59070+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59071+4 4 4 4 4 4
59072+4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
59073+193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
59074+5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
59075+10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
59076+1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
59077+26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
59078+22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
59079+26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
59080+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
59081+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
59082+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
59083+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59084+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59085+4 4 4 4 4 4
59086+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
59087+190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
59088+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
59089+10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
59090+2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
59091+26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
59092+10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
59093+26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
59094+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
59095+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
59096+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
59097+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59098+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59099+4 4 4 4 4 4
59100+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
59101+193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
59102+5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
59103+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
59104+13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
59105+10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
59106+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
59107+26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
59108+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
59109+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
59110+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
59111+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59112+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59113+4 4 4 4 4 4
59114+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
59115+190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
59116+5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
59117+28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
59118+10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
59119+28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
59120+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
59121+26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
59122+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
59123+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
59124+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
59125+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59126+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59127+4 4 4 4 4 4
59128+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
59129+193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
59130+5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
59131+4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
59132+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
59133+10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
59134+18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
59135+22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
59136+4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
59137+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
59138+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
59139+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59140+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59141+4 4 4 4 4 4
59142+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
59143+190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
59144+6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
59145+1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
59146+18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
59147+10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
59148+26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
59149+1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
59150+5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
59151+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
59152+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
59153+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59154+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59155+4 4 4 4 4 4
59156+4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
59157+193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
59158+2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
59159+4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
59160+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
59161+10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
59162+26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
59163+2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
59164+3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
59165+131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
59166+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
59167+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59168+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59169+4 4 4 4 4 4
59170+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
59171+193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
59172+0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
59173+4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
59174+13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
59175+10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
59176+28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
59177+4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
59178+0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
59179+125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
59180+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
59181+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59182+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59183+4 4 4 4 4 4
59184+4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
59185+193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
59186+120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
59187+4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
59188+4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
59189+10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
59190+4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
59191+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
59192+24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
59193+125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
59194+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
59195+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59196+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59197+4 4 4 4 4 4
59198+4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
59199+174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
59200+220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
59201+3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
59202+4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
59203+10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
59204+1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
59205+5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
59206+137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
59207+125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
59208+0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59209+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59210+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59211+4 4 4 4 4 4
59212+5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
59213+193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
59214+220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
59215+4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
59216+4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
59217+22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
59218+4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59219+1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
59220+166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
59221+125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
59222+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
59223+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59224+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59225+4 4 4 4 4 4
59226+4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
59227+220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
59228+205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
59229+24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
59230+4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
59231+4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
59232+4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
59233+2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
59234+156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
59235+137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
59236+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59237+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59238+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59239+4 4 4 4 4 4
59240+5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
59241+125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
59242+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
59243+193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
59244+5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
59245+1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
59246+5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
59247+60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
59248+153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
59249+125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
59250+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59251+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59252+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59253+4 4 4 4 4 4
59254+4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
59255+6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
59256+193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
59257+244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
59258+0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
59259+4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
59260+3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
59261+220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
59262+153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
59263+13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
59264+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59265+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59266+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59267+4 4 4 4 4 4
59268+5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
59269+6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
59270+244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
59271+220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
59272+3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
59273+4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
59274+0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
59275+177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
59276+158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
59277+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
59278+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59279+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59280+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59281+4 4 4 4 4 4
59282+5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
59283+6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
59284+177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
59285+220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
59286+125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
59287+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
59288+37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
59289+174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
59290+158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
59291+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
59292+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59293+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59294+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59295+4 4 4 4 4 4
59296+4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
59297+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
59298+26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
59299+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
59300+244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
59301+0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
59302+177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
59303+174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
59304+60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
59305+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59306+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59307+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59308+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59309+4 4 4 4 4 4
59310+5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
59311+6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
59312+6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
59313+220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
59314+220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
59315+0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
59316+220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
59317+174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
59318+4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
59319+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59320+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59321+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59322+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59323+4 4 4 4 4 4
59324+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
59325+6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
59326+4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
59327+220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
59328+205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
59329+60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
59330+177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
59331+190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
59332+4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59333+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59334+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59335+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59336+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59337+4 4 4 4 4 4
59338+4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
59339+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
59340+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
59341+125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
59342+205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
59343+193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
59344+190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
59345+153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
59346+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59347+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59348+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59349+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59350+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59351+4 4 4 4 4 4
59352+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
59353+6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
59354+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
59355+4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
59356+205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
59357+220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
59358+174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
59359+6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
59360+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59361+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59362+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59363+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59364+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59365+4 4 4 4 4 4
59366+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
59367+5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
59368+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
59369+4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
59370+220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
59371+190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
59372+193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
59373+4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
59374+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59375+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59376+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59377+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59378+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59379+4 4 4 4 4 4
59380+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59381+4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
59382+4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
59383+6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
59384+174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
59385+193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
59386+193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
59387+6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
59388+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59389+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59390+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59391+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59392+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59393+4 4 4 4 4 4
59394+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59395+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
59396+5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
59397+5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
59398+6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
59399+193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
59400+60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
59401+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
59402+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59403+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59404+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59405+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59406+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59407+4 4 4 4 4 4
59408+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59409+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59410+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
59411+5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
59412+4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
59413+193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
59414+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
59415+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
59416+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59417+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59418+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59419+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59420+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59421+4 4 4 4 4 4
59422+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59423+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59424+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
59425+4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
59426+6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
59427+153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
59428+6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
59429+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59430+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59431+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59432+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59433+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59434+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59435+4 4 4 4 4 4
59436+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59437+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59438+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59439+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
59440+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
59441+24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
59442+6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
59443+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59444+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59445+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59446+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59447+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59448+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59449+4 4 4 4 4 4
59450+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59451+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59452+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59453+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
59454+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
59455+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
59456+4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
59457+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59458+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59459+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59460+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59461+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59462+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59463+4 4 4 4 4 4
59464+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59465+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59466+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59467+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
59468+5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
59469+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
59470+6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
59471+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59472+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59473+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59474+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59475+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59476+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59477+4 4 4 4 4 4
59478+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59479+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59480+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59481+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
59482+4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
59483+4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
59484+6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
59485+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59486+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59487+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59488+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59489+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59490+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59491+4 4 4 4 4 4
59492+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59493+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59494+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59495+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59496+4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
59497+6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
59498+4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
59499+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59500+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59501+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59502+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59503+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59504+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59505+4 4 4 4 4 4
59506+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59507+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59508+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59509+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59510+4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
59511+4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
59512+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59513+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59514+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59515+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59516+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59517+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59518+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59519+4 4 4 4 4 4
59520+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59521+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59522+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59523+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59524+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
59525+5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
59526+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59527+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59528+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59529+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59530+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59531+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59532+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59533+4 4 4 4 4 4
59534+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59535+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59536+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59537+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59538+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
59539+5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
59540+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59541+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59542+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59543+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59544+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59545+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59546+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
59547+4 4 4 4 4 4
59548diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
59549index 3838795..0d48d61 100644
59550--- a/drivers/xen/events/events_base.c
59551+++ b/drivers/xen/events/events_base.c
59552@@ -1568,7 +1568,7 @@ void xen_irq_resume(void)
59553 restore_pirqs();
59554 }
59555
59556-static struct irq_chip xen_dynamic_chip __read_mostly = {
59557+static struct irq_chip xen_dynamic_chip = {
59558 .name = "xen-dyn",
59559
59560 .irq_disable = disable_dynirq,
59561@@ -1582,7 +1582,7 @@ static struct irq_chip xen_dynamic_chip __read_mostly = {
59562 .irq_retrigger = retrigger_dynirq,
59563 };
59564
59565-static struct irq_chip xen_pirq_chip __read_mostly = {
59566+static struct irq_chip xen_pirq_chip = {
59567 .name = "xen-pirq",
59568
59569 .irq_startup = startup_pirq,
59570@@ -1602,7 +1602,7 @@ static struct irq_chip xen_pirq_chip __read_mostly = {
59571 .irq_retrigger = retrigger_dynirq,
59572 };
59573
59574-static struct irq_chip xen_percpu_chip __read_mostly = {
59575+static struct irq_chip xen_percpu_chip = {
59576 .name = "xen-percpu",
59577
59578 .irq_disable = disable_dynirq,
59579diff --git a/drivers/xen/xenfs/xenstored.c b/drivers/xen/xenfs/xenstored.c
59580index fef20db..d28b1ab 100644
59581--- a/drivers/xen/xenfs/xenstored.c
59582+++ b/drivers/xen/xenfs/xenstored.c
59583@@ -24,7 +24,12 @@ static int xsd_release(struct inode *inode, struct file *file)
59584 static int xsd_kva_open(struct inode *inode, struct file *file)
59585 {
59586 file->private_data = (void *)kasprintf(GFP_KERNEL, "0x%p",
59587+#ifdef CONFIG_GRKERNSEC_HIDESYM
59588+ NULL);
59589+#else
59590 xen_store_interface);
59591+#endif
59592+
59593 if (!file->private_data)
59594 return -ENOMEM;
59595 return 0;
59596diff --git a/firmware/Makefile b/firmware/Makefile
59597index e297e1b..6900c31 100644
59598--- a/firmware/Makefile
59599+++ b/firmware/Makefile
59600@@ -35,6 +35,7 @@ fw-shipped-$(CONFIG_BNX2X) += bnx2x/bnx2x-e1-6.2.9.0.fw \
59601 bnx2x/bnx2x-e1h-6.2.9.0.fw \
59602 bnx2x/bnx2x-e2-6.2.9.0.fw
59603 fw-shipped-$(CONFIG_BNX2) += bnx2/bnx2-mips-09-6.2.1a.fw \
59604+ bnx2/bnx2-mips-09-6.2.1b.fw \
59605 bnx2/bnx2-rv2p-09-6.0.17.fw \
59606 bnx2/bnx2-rv2p-09ax-6.0.17.fw \
59607 bnx2/bnx2-mips-06-6.2.1.fw \
59608diff --git a/firmware/WHENCE b/firmware/WHENCE
59609index 0c4d96d..7563083 100644
59610--- a/firmware/WHENCE
59611+++ b/firmware/WHENCE
59612@@ -655,19 +655,20 @@ Driver: BNX2 - Broadcom NetXtremeII
59613 File: bnx2/bnx2-mips-06-6.2.1.fw
59614 File: bnx2/bnx2-rv2p-06-6.0.15.fw
59615 File: bnx2/bnx2-mips-09-6.2.1a.fw
59616+File: bnx2/bnx2-mips-09-6.2.1b.fw
59617 File: bnx2/bnx2-rv2p-09-6.0.17.fw
59618 File: bnx2/bnx2-rv2p-09ax-6.0.17.fw
59619
59620 Licence:
59621-
59622- This file contains firmware data derived from proprietary unpublished
59623- source code, Copyright (c) 2004 - 2010 Broadcom Corporation.
59624-
59625- Permission is hereby granted for the distribution of this firmware data
59626- in hexadecimal or equivalent format, provided this copyright notice is
59627- accompanying it.
59628-
59629-Found in hex form in kernel source.
59630+
59631+ This file contains firmware data derived from proprietary unpublished
59632+ source code, Copyright (c) 2004 - 2010 Broadcom Corporation.
59633+
59634+ Permission is hereby granted for the distribution of this firmware data
59635+ in hexadecimal or equivalent format, provided this copyright notice is
59636+ accompanying it.
59637+
59638+Found in hex form in kernel source.
59639
59640 --------------------------------------------------------------------------
59641
59642diff --git a/firmware/bnx2/bnx2-mips-09-6.2.1b.fw.ihex b/firmware/bnx2/bnx2-mips-09-6.2.1b.fw.ihex
59643new file mode 100644
59644index 0000000..43d7c4f
59645--- /dev/null
59646+++ b/firmware/bnx2/bnx2-mips-09-6.2.1b.fw.ihex
59647@@ -0,0 +1,6496 @@
59648+:10000000080001180800000000005594000000C816
59649+:1000100000000000000000000000000008005594EF
59650+:10002000000000380000565C080000A00800000036
59651+:100030000000574400005694080059200000008436
59652+:100040000000ADD808005744000001C00000AE5CBD
59653+:100050000800321008000000000092580000B01C98
59654+:10006000000000000000000000000000080092589E
59655+:100070000000033C000142740800049008000400E2
59656+:10008000000012FC000145B000000000000000006C
59657+:1000900000000000080016FC00000004000158AC3D
59658+:1000A000080000A80800000000003D00000158B052
59659+:1000B00000000000000000000000000008003D00FB
59660+:1000C00000000030000195B00A000046000000006A
59661+:1000D000000000000000000D636F6D362E322E31DF
59662+:1000E00062000000060201020000000000000003A0
59663+:1000F000000000C800000032000000030000000003
59664+:1001000000000000000000000000000000000000EF
59665+:1001100000000010000001360000EA600000000549
59666+:1001200000000000000000000000000000000008C7
59667+:1001300000000000000000000000000000000000BF
59668+:1001400000000000000000000000000000000000AF
59669+:10015000000000000000000000000000000000009F
59670+:10016000000000020000000000000000000000008D
59671+:10017000000000000000000000000000000000007F
59672+:10018000000000000000000000000010000000005F
59673+:10019000000000000000000000000000000000005F
59674+:1001A000000000000000000000000000000000004F
59675+:1001B000000000000000000000000000000000003F
59676+:1001C000000000000000000000000000000000002F
59677+:1001D000000000000000000000000000000000001F
59678+:1001E0000000000010000003000000000000000DEF
59679+:1001F0000000000D3C020800244256083C030800A1
59680+:1002000024635754AC4000000043202B1480FFFDB2
59681+:10021000244200043C1D080037BD9FFC03A0F021D0
59682+:100220003C100800261001183C1C0800279C5608AA
59683+:100230000E000256000000000000000D27BDFFB4B4
59684+:10024000AFA10000AFA20004AFA30008AFA4000C50
59685+:10025000AFA50010AFA60014AFA70018AFA8001CF0
59686+:10026000AFA90020AFAA0024AFAB0028AFAC002C90
59687+:10027000AFAD0030AFAE0034AFAF0038AFB8003C28
59688+:10028000AFB90040AFBC0044AFBF00480E001544FA
59689+:10029000000000008FBF00488FBC00448FB90040B1
59690+:1002A0008FB8003C8FAF00388FAE00348FAD003078
59691+:1002B0008FAC002C8FAB00288FAA00248FA90020C0
59692+:1002C0008FA8001C8FA700188FA600148FA5001000
59693+:1002D0008FA4000C8FA300088FA200048FA1000040
59694+:1002E00027BD004C3C1B60108F7A5030377B502864
59695+:1002F00003400008AF7A00008F82002427BDFFE092
59696+:10030000AFB00010AFBF0018AFB100148C42000CAA
59697+:100310003C1080008E110100104000348FBF001887
59698+:100320000E000D84000000008F85002024047FFF54
59699+:100330000091202BACB100008E030104960201084D
59700+:1003400000031C003042FFFF00621825ACA300042C
59701+:100350009202010A96030114304200FF3063FFFF4E
59702+:100360000002140000431025ACA200089603010C03
59703+:100370009602010E00031C003042FFFF00621825A8
59704+:10038000ACA3000C960301109602011200031C009E
59705+:100390003042FFFF00621825ACA300108E02011846
59706+:1003A000ACA200148E02011CACA20018148000083C
59707+:1003B0008F820024978200003C0420050044182509
59708+:1003C00024420001ACA3001C0A0000C6A782000062
59709+:1003D0003C0340189442001E00431025ACA2001CB0
59710+:1003E0000E000DB8240400018FBF00188FB1001457
59711+:1003F0008FB000100000102103E0000827BD00208E
59712+:100400003C0780008CE202B834E50100044100089A
59713+:10041000240300013C0208008C42006024420001D9
59714+:100420003C010800AC22006003E0000800601021DD
59715+:100430003C0208008C42005C8CA4002094A30016AF
59716+:100440008CA6000494A5000E24420001ACE40280B6
59717+:100450002463FFFC3C010800AC22005C3C0210005D
59718+:10046000A4E30284A4E5028600001821ACE6028819
59719+:10047000ACE202B803E000080060102127BDFFE0F5
59720+:100480003C028000AFB0001034420100AFBF001C3E
59721+:10049000AFB20018AFB100148C43000094450008BF
59722+:1004A0002462FE002C42038110400003000381C23D
59723+:1004B0000A00010226100004240201001462000553
59724+:1004C0003C1180003C02800890420004305000FF44
59725+:1004D0003C11800036320100964300143202000FB6
59726+:1004E00000021500004310253C0308008C63004403
59727+:1004F00030A40004AE220080246300013C01080007
59728+:10050000AC2300441080000730A200028FBF001C03
59729+:100510008FB200188FB100148FB000100A0000CE07
59730+:1005200027BD00201040002D0000182130A20080BF
59731+:1005300010400005362200708E44001C0E000C672F
59732+:10054000240500A0362200708C4400008F82000C2D
59733+:10055000008210232C43012C10600004AF82001095
59734+:10056000240300010A000145AF84000C8E42000400
59735+:100570003C036020AF84000CAC6200143C02080015
59736+:100580008C42005850400015000018218C62000475
59737+:10059000240301FE304203FF144300100000182121
59738+:1005A0002E020004104000032E0200080A00014041
59739+:1005B0000000802114400003000000000A000140F8
59740+:1005C0002610FFF90000000D2402000202021004B0
59741+:1005D0003C036000AC626914000018218FBF001C4E
59742+:1005E0008FB200188FB100148FB00010006010217E
59743+:1005F00003E0000827BD00203C0480008C8301003C
59744+:1006000024020100506200033C0280080000000D3B
59745+:100610003C02800890430004000010213063000F6A
59746+:1006200000031D0003E00008AC8300800004188074
59747+:100630002782FF9C00621821000410C00044102390
59748+:100640008C640000000210C03C030800246356E4E0
59749+:10065000004310213C038000AC64009003E00008DC
59750+:10066000AF8200243C0208008C42011410400019A3
59751+:100670003084400030A2007F000231C03C02020002
59752+:100680001080001400A218253C026020AC43001426
59753+:100690003C0408008C8456B83C0308008C630110AD
59754+:1006A0003C02800024050900AC4500200086202182
59755+:1006B000246300013C028008AC4400643C01080053
59756+:1006C000AC2301103C010800AC2456B803E000083C
59757+:1006D000000000003C02602003E00008AC4500146C
59758+:1006E00003E000080000102103E0000800001021D2
59759+:1006F00030A2000810400008240201003C0208005B
59760+:100700008C42010C244200013C010800AC22010C87
59761+:1007100003E0000800000000148200080000000050
59762+:100720003C0208008C4200FC244200013C0108000D
59763+:10073000AC2200FC0A0001A330A200203C02080009
59764+:100740008C420084244200013C010800AC22008459
59765+:1007500030A200201040000830A200103C02080027
59766+:100760008C420108244200013C010800AC2201082F
59767+:1007700003E0000800000000104000080000000036
59768+:100780003C0208008C420104244200013C010800A4
59769+:10079000AC22010403E00008000000003C02080055
59770+:1007A0008C420100244200013C010800AC220100FF
59771+:1007B00003E000080000000027BDFFE0AFB1001417
59772+:1007C0003C118000AFB20018AFBF001CAFB00010EA
59773+:1007D0003632010096500008320200041040000733
59774+:1007E000320300028FBF001C8FB200188FB10014BB
59775+:1007F0008FB000100A0000CE27BD00201060000B53
59776+:10080000020028218E2401000E00018A0000000051
59777+:100810003202008010400003240500A10E000C6786
59778+:100820008E44001C0A0001E3240200018E2301040F
59779+:100830008F82000810430006020028218E24010048
59780+:100840000E00018A000000008E220104AF82000821
59781+:10085000000010218FBF001C8FB200188FB1001450
59782+:100860008FB0001003E0000827BD00202C82000498
59783+:1008700014400002000018212483FFFD240200021E
59784+:10088000006210043C03600003E00008AC626914DD
59785+:1008900027BDFFE0AFBF001CAFB20018AFB100141E
59786+:1008A000AFB000103C048000948201083043700017
59787+:1008B000240220001062000A2862200154400052E5
59788+:1008C0008FBF001C24024000106200482402600018
59789+:1008D0001062004A8FBF001C0A0002518FB200183C
59790+:1008E00034820100904300098C5000189451000C90
59791+:1008F000240200091062001C0000902128620009F7
59792+:10090000144000218F8200242402000A5062001249
59793+:10091000323100FF2402000B1062000F00000000C3
59794+:100920002402000C146200188F8200243C0208008C
59795+:100930008C4256B824030900AC83002000501021DB
59796+:100940003C038008AC6200643C010800AC2256B84D
59797+:100950000A0002508FBF001C0E0001E900102602A1
59798+:100960000A0002308F8200240E0001E900102602E6
59799+:100970003C0380089462001A8C72000C3042FFFF26
59800+:10098000020280258F8200248C42000C5040001E01
59801+:100990008FBF001C0E000D84000000003C02800090
59802+:1009A00034420100944300088F82002400031C009D
59803+:1009B0009444001E8F82002000641825AC50000073
59804+:1009C00024040001AC510004AC520008AC40000CFF
59805+:1009D000AC400010AC400014AC4000180E000DB844
59806+:1009E000AC43001C0A0002508FBF001C0E000440E4
59807+:1009F000000000000A0002508FBF001C0E000C9F78
59808+:100A0000000000008FBF001C8FB200188FB10014CF
59809+:100A10008FB000100000102103E0000827BD002067
59810+:100A200027BDFFD8AFB400203C036010AFBF002447
59811+:100A3000AFB3001CAFB20018AFB10014AFB00010DC
59812+:100A40008C6450002402FF7F3C1408002694563822
59813+:100A5000008220243484380CAC6450003C028000B6
59814+:100A6000240300370E0014B0AC4300083C07080014
59815+:100A700024E70618028010212404001D2484FFFFAF
59816+:100A8000AC4700000481FFFD244200043C02080042
59817+:100A9000244207C83C010800AC2256403C02080032
59818+:100AA000244202303C030800246306203C04080072
59819+:100AB000248403B43C05080024A506F03C06080085
59820+:100AC00024C62C9C3C010800AC2256803C02080045
59821+:100AD000244205303C010800AC2756843C01080044
59822+:100AE000AC2656943C010800AC23569C3C010800FF
59823+:100AF000AC2456A03C010800AC2556A43C010800DB
59824+:100B0000AC2256A83C010800AC23563C3C0108002E
59825+:100B1000AC2456443C010800AC2056603C0108005F
59826+:100B2000AC2556643C010800AC2056703C0108001E
59827+:100B3000AC27567C3C010800AC2656903C010800CE
59828+:100B4000AC2356980E00056E00000000AF80000C2C
59829+:100B50003C0280008C5300008F8300043C0208009C
59830+:100B60008C420020106200213262000700008821C0
59831+:100B70002792FF9C3C100800261056E43C02080017
59832+:100B80008C42002024050001022518040043202483
59833+:100B90008F820004004310245044000C26310001D1
59834+:100BA00010800008AF9000248E4300003C028000BB
59835+:100BB000AC4300900E000D4BAE05000C0A0002C1C4
59836+:100BC00026310001AE00000C263100012E22000269
59837+:100BD000261000381440FFE9265200043C020800A9
59838+:100BE0008C420020AF820004326200071040FFD91F
59839+:100BF0003C028000326200011040002D326200028F
59840+:100C00003C0580008CA2010000002021ACA2002045
59841+:100C10008CA301042C42078110400008ACA300A85B
59842+:100C200094A2010824032000304270001443000302
59843+:100C30003C02800890420005304400FF0E0001593C
59844+:100C4000000000003C0280009042010B304300FF96
59845+:100C50002C62001E54400004000310800E00018628
59846+:100C60000A0002EC00000000005410218C42000039
59847+:100C70000040F80900000000104000043C02800021
59848+:100C80008C4301043C026020AC4300143C02080089
59849+:100C90008C4200343C0440003C03800024420001AC
59850+:100CA000AC6401383C010800AC220034326200021E
59851+:100CB00010400010326200043C1080008E0201409F
59852+:100CC000000020210E000159AE0200200E00038317
59853+:100CD000000000003C024000AE0201783C02080027
59854+:100CE0008C420038244200013C010800AC2200384C
59855+:100CF000326200041040FF973C0280003C108000EC
59856+:100D00008E020180000020210E000159AE02002059
59857+:100D10008E03018024020F00546200073C02800809
59858+:100D20008E0201883C0300E03042FFFF00431025A3
59859+:100D30000A000328AE020080344200809042000086
59860+:100D400024030050304200FF14430007000000005D
59861+:100D50000E000362000000001440000300000000C9
59862+:100D60000E000971000000003C0208008C42003CAB
59863+:100D70003C0440003C03800024420001AC6401B804
59864+:100D80003C010800AC22003C0A0002A33C028000A7
59865+:100D90003C02900034420001008220253C02800089
59866+:100DA000AC4400203C0380008C6200200440FFFE25
59867+:100DB0000000000003E00008000000003C0280008A
59868+:100DC000344300010083202503E00008AC440020E8
59869+:100DD00027BDFFE0AFB10014AFB000100080882144
59870+:100DE000AFBF00180E00033230B000FF8F83FF94B6
59871+:100DF000022020219062002502028025A07000259B
59872+:100E00008C7000183C0280000E00033D020280241A
59873+:100E10001600000B8FBF00183C0480008C8201F884
59874+:100E20000440FFFE348201C024030002AC510000E4
59875+:100E3000A04300043C021000AC8201F88FBF0018F0
59876+:100E40008FB100148FB0001003E0000827BD002010
59877+:100E500027BDFFE83C028000AFBF00103442018094
59878+:100E6000944300048C4400083063020010600005C5
59879+:100E7000000028210E00100C000000000A0003787A
59880+:100E8000240500013C02FF000480000700821824B2
59881+:100E90003C02040014620004240500018F82FF94C8
59882+:100EA00090420008240500018FBF001000A010210F
59883+:100EB00003E0000827BD00188F82FF982405000179
59884+:100EC000A040001A3C028000344201400A00034264
59885+:100ED0008C4400008F85FF9427BDFFE0AFBF001C4E
59886+:100EE000AFB20018AFB10014AFB0001090A2000074
59887+:100EF000304400FF38830020388200300003182B74
59888+:100F00000002102B0062182410600003240200501D
59889+:100F1000148200A88FBF001C90A20005304200017F
59890+:100F2000104000A48FBF001C3C02800034420140EE
59891+:100F3000904200082443FFFF2C6200051040009EF1
59892+:100F40008FB20018000310803C030800246355ACE6
59893+:100F5000004310218C420000004000080000000007
59894+:100F60003C028000345101400E0003328E24000008
59895+:100F70008F92FF948E2200048E50000C1602000205
59896+:100F800024020001AE42000C0E00033D8E2400003E
59897+:100F90008E220004145000068FBF001C8FB2001870
59898+:100FA0008FB100148FB000100A000F7827BD002009
59899+:100FB0008E42000C0A000419000000003C0480006E
59900+:100FC0003482014094A300108C4200043063FFFF80
59901+:100FD0001443001C0000000024020001A4A2001021
59902+:100FE0008C8202380441000F3C0380003C02003F29
59903+:100FF0003448F0003C0760003C06FFC08CE22BBC8C
59904+:1010000000461824004810240002130200031D8229
59905+:10101000106200583C0280008C8202380440FFF7C6
59906+:101020003C038000346201408C44000034620200C2
59907+:10103000AC4400003C021000AC6202380A00043BE1
59908+:101040008FBF001C94A200100A00041900000000C9
59909+:10105000240200201482000F3C0280003C03800028
59910+:1010600094A20012346301408C6300043042FFFFFD
59911+:10107000146200050000000024020001A4A2001276
59912+:101080000A0004028FBF001C94A200120A00041977
59913+:1010900000000000345101400E0003328E24000095
59914+:1010A0008F92FF948E230004964200123050FFFF6F
59915+:1010B0001603000224020001A64200120E00033DA6
59916+:1010C0008E2400008E220004160200068FBF001C32
59917+:1010D0008FB200188FB100148FB000100A00037C8B
59918+:1010E00027BD0020964200120A00041900000000EB
59919+:1010F0003C03800094A20014346301408C6300041C
59920+:101100003042FFFF14620008240200018FBF001C60
59921+:101110008FB200188FB100148FB00010A4A2001479
59922+:101120000A00146327BD002094A20014144000217B
59923+:101130008FBF001C0A000435000000003C03800043
59924+:1011400094A20016346301408C6300043042FFFF18
59925+:101150001462000D240200018FBF001C8FB2001822
59926+:101160008FB100148FB00010A4A200160A000B1457
59927+:1011700027BD00209442007824420004A4A200105D
59928+:101180000A00043B8FBF001C94A200162403000138
59929+:101190003042FFFF144300078FBF001C3C020800D1
59930+:1011A0008C420070244200013C010800AC22007017
59931+:1011B0008FBF001C8FB200188FB100148FB00010C9
59932+:1011C00003E0000827BD002027BDFFD8AFB20018FC
59933+:1011D0008F92FF94AFB10014AFBF0020AFB3001CDB
59934+:1011E000AFB000103C028000345101008C5001006F
59935+:1011F0009242000092230009304400FF2402001FA5
59936+:10120000106200AB28620020104000192402003850
59937+:101210002862000A1040000D2402000B286200081A
59938+:101220001040002E8F820024046001042862000216
59939+:101230001440002A8F820024240200061062002637
59940+:101240008FBF00200A00055F8FB3001C1062006092
59941+:101250002862000B144000FA8FBF00202402000E09
59942+:10126000106200788F8200240A00055F8FB3001C93
59943+:10127000106200D2286200391040000A2402008067
59944+:1012800024020036106200E528620037104000C3D7
59945+:1012900024020035106200D98FBF00200A00055FCC
59946+:1012A0008FB3001C1062002D2862008110400006E0
59947+:1012B000240200C824020039106200C98FBF002038
59948+:1012C0000A00055F8FB3001C106200A28FBF0020D0
59949+:1012D0000A00055F8FB3001C8F8200248C42000C33
59950+:1012E000104000D78FBF00200E000D8400000000CA
59951+:1012F0003C038000346301008C6200008F85002075
59952+:10130000946700089466000CACA200008C64000492
59953+:101310008F82002400063400ACA400049448001E10
59954+:101320008C62001800073C0000E83825ACA20008D9
59955+:101330008C62001C24040001ACA2000C9062000A24
59956+:1013400000C23025ACA60010ACA00014ACA0001860
59957+:10135000ACA7001C0A00051D8FBF00208F8200244F
59958+:101360008C42000C104000B68FBF00200E000D8490
59959+:10137000000000008F820024962400089625000CAF
59960+:101380009443001E000422029626000E8F82002045
59961+:10139000000426000083202500052C003C0300806B
59962+:1013A00000A6282500832025AC400000AC400004A6
59963+:1013B000AC400008AC40000CAC450010AC40001440
59964+:1013C000AC400018AC44001C0A00051C24040001B9
59965+:1013D0009622000C14400018000000009242000504
59966+:1013E0003042001014400014000000000E000332D0
59967+:1013F0000200202192420005020020213442001008
59968+:101400000E00033DA242000592420000240300208A
59969+:10141000304200FF10430089020020218FBF0020CE
59970+:101420008FB3001C8FB200188FB100148FB0001062
59971+:101430000A00107527BD00280000000D0A00055E97
59972+:101440008FBF00208C42000C1040007D8FBF002019
59973+:101450000E000D84000000008E2200048F84002006
59974+:101460009623000CAC8200003C0280089445002CBE
59975+:101470008F82002400031C0030A5FFFF9446001E4D
59976+:101480003C02400E0065182500C23025AC830004E4
59977+:10149000AC800008AC80000CAC800010AC80001464
59978+:1014A000AC800018AC86001C0A00051C2404000156
59979+:1014B0000E000332020020218F93FF9802002021AA
59980+:1014C0000E00033DA660000C020020210E00034226
59981+:1014D000240500018F8200248C42000C104000582B
59982+:1014E0008FBF00200E000D84000000009622000C2B
59983+:1014F0008F83002000021400AC700000AC62000476
59984+:10150000AC6000088E4400388F820024AC64000C6C
59985+:101510008E46003C9445001E3C02401FAC66001005
59986+:1015200000A228258E62000424040001AC6200148D
59987+:10153000AC600018AC65001C8FBF00208FB3001C8E
59988+:101540008FB200188FB100148FB000100A000DB8D0
59989+:1015500027BD0028240200201082003A8FB3001C0F
59990+:101560000E000F5E00000000104000358FBF00200D
59991+:101570003C0480008C8201F80440FFFE348201C0EC
59992+:1015800024030002AC500000A04300043C02100001
59993+:10159000AC8201F80A00055E8FBF00200200202106
59994+:1015A0008FBF00208FB3001C8FB200188FB10014C2
59995+:1015B0008FB000100A000EA727BD00289625000C4A
59996+:1015C000020020218FBF00208FB3001C8FB20018B3
59997+:1015D0008FB100148FB000100A000ECC27BD002878
59998+:1015E000020020218FB3001C8FB200188FB10014AD
59999+:1015F0008FB000100A000EF727BD00289225000DBD
60000+:10160000020020218FB3001C8FB200188FB100148C
60001+:101610008FB000100A000F4827BD002802002021CB
60002+:101620008FBF00208FB3001C8FB200188FB1001441
60003+:101630008FB000100A000F1F27BD00288FBF0020A9
60004+:101640008FB3001C8FB200188FB100148FB0001040
60005+:1016500003E0000827BD00283C0580008CA202782A
60006+:101660000440FFFE34A2024024030002AC44000008
60007+:10167000A04300043C02100003E00008ACA2027882
60008+:10168000A380001803E00008A38000193C03800039
60009+:101690008C6202780440FFFE8F82001CAC62024024
60010+:1016A00024020002A06202443C02100003E0000891
60011+:1016B000AC6202783C02600003E000088C425404F3
60012+:1016C0009083003024020005008040213063003FF9
60013+:1016D0000000482114620005000050219082004C57
60014+:1016E0009483004E304900FF306AFFFFAD00000CCC
60015+:1016F000AD000010AD000024950200148D05001C03
60016+:101700008D0400183042FFFF004910230002110031
60017+:10171000000237C3004038210086202300A2102B8E
60018+:101720000082202300A72823AD05001CAD0400186B
60019+:10173000A5090014A5090020A50A001603E0000869
60020+:10174000A50A002203E000080000000027BDFFD822
60021+:10175000AFB200183C128008AFB40020AFB3001C39
60022+:10176000AFB10014AFBF0024AFB00010365101007C
60023+:101770003C0260008C4254049222000C3C1408008D
60024+:10178000929400F7304300FF2402000110620032FF
60025+:101790000080982124020002146200353650008037
60026+:1017A0000E00143D000000009202004C2403FF8054
60027+:1017B0003C0480003042007F000211C024420240FD
60028+:1017C0000262102100431824AC8300949245000863
60029+:1017D0009204004C3042007F3C03800614850007D1
60030+:1017E000004380212402FFFFA22200112402FFFFF8
60031+:1017F000A62200120A0005D22402FFFF9602002052
60032+:10180000A222001196020022A62200128E020024BB
60033+:101810003C048008AE2200143485008090A2004C65
60034+:1018200034830100A06200108CA2003CAC6200185E
60035+:101830008C820068AC6200F48C820064AC6200F0C0
60036+:101840008C82006CAC6200F824020001A0A2006847
60037+:101850000A0005EE3C0480080E001456000000004B
60038+:1018600036420080A04000680A0005EE3C04800873
60039+:10187000A2000068A20000690A0006293C02800854
60040+:10188000348300808C62003834850100AC62006CC7
60041+:1018900024020001A062006990A200D59083000894
60042+:1018A000305100FF3072007F12320019001111C058
60043+:1018B00024420240026210212403FF8000431824C6
60044+:1018C0003C048000AC8300943042007F3C038006DF
60045+:1018D000004380218E02000C1040000D02002021E8
60046+:1018E0000E00057E0000000026220001305100FF9E
60047+:1018F0009203003C023410260002102B0002102339
60048+:101900003063007F022288240A0005F8A203003C0D
60049+:101910003C088008350401008C8200E03507008017
60050+:10192000ACE2003C8C8200E0AD02000090E5004C8F
60051+:10193000908600D590E3004C908400D52402FF806F
60052+:1019400000A228243063007F308400FF00A62825F1
60053+:101950000064182A1060000230A500FF38A500803E
60054+:10196000A0E5004CA10500093C0280089043000E50
60055+:10197000344400803C058000A043000A8C8300189A
60056+:101980003C027FFF3442FFFF00621824AC83001842
60057+:101990008CA201F80440FFFE00000000ACB301C0BF
60058+:1019A0008FBF00248FB400208FB3001C8FB20018AB
60059+:1019B0008FB100148FB0001024020002A0A201C455
60060+:1019C00027BD00283C02100003E00008ACA201F88B
60061+:1019D00090A2000024420001A0A200003C030800E5
60062+:1019E0008C6300F4304200FF144300020080302179
60063+:1019F000A0A0000090A200008F84001C000211C073
60064+:101A00002442024024830040008220212402FF80DF
60065+:101A1000008220243063007F3C02800A006218218B
60066+:101A20003C028000AC44002403E00008ACC300008A
60067+:101A300094820006908300058C85000C8C86001033
60068+:101A40008C8700188C88001C8C8400203C010800C6
60069+:101A5000A42256C63C010800A02356C53C0108003C
60070+:101A6000AC2556CC3C010800AC2656D03C01080001
60071+:101A7000AC2756D83C010800AC2856DC3C010800D5
60072+:101A8000AC2456E003E00008000000003C0280089F
60073+:101A9000344201008C4400343C038000346504006F
60074+:101AA000AC6400388C420038AF850028AC62003C42
60075+:101AB0003C020005AC6200300000000000000000A5
60076+:101AC00003E00008000000003C020006308400FF34
60077+:101AD000008220253C028000AC4400300000000061
60078+:101AE00000000000000000003C0380008C62000049
60079+:101AF000304200101040FFFD3462040003E0000893
60080+:101B0000AF82002894C200003C080800950800CA73
60081+:101B100030E7FFFF0080482101021021A4C200002D
60082+:101B200094C200003042FFFF00E2102B544000013D
60083+:101B3000A4C7000094A200003C0308008C6300CC02
60084+:101B400024420001A4A2000094A200003042FFFF42
60085+:101B5000144300073C0280080107102BA4A00000DA
60086+:101B60005440000101003821A4C700003C02800855
60087+:101B7000344601008CC3002894A200003C0480007D
60088+:101B80003042FFFE000210C000621021AC82003C17
60089+:101B90008C82003C006218231860000400000000E2
60090+:101BA0008CC200240A0006BA244200018CC2002420
60091+:101BB000AC8200383C020050344200103C038000EC
60092+:101BC000AC620030000000000000000000000000D7
60093+:101BD0008C620000304200201040FFFD0000000039
60094+:101BE00094A200003C04800030420001000210C0BA
60095+:101BF000004410218C430400AD2300008C420404F7
60096+:101C0000AD2200043C02002003E00008AC8200305A
60097+:101C100027BDFFE0AFB20018AFB10014AFB00010A5
60098+:101C2000AFBF001C94C2000000C080213C1208001D
60099+:101C3000965200C624420001A6020000960300004E
60100+:101C400094E2000000E03021144300058FB1003021
60101+:101C50000E00068F024038210A0006F10000000045
60102+:101C60008C8300048C82000424420040046100073D
60103+:101C7000AC8200048C8200040440000400000000D8
60104+:101C80008C82000024420001AC8200009602000019
60105+:101C90003042FFFF50520001A600000096220000D3
60106+:101CA00024420001A62200003C02800834420100C8
60107+:101CB000962300009442003C144300048FBF001C94
60108+:101CC00024020001A62200008FBF001C8FB2001862
60109+:101CD0008FB100148FB0001003E0000827BD002072
60110+:101CE00027BDFFE03C028008AFBF0018344201006E
60111+:101CF0008C4800343C03800034690400AC68003830
60112+:101D00008C42003830E700FFAF890028AC62003C0D
60113+:101D10003C020005AC620030000000000000000042
60114+:101D200000000000000000000000000000000000B3
60115+:101D30008C82000C8C82000C97830016AD22000070
60116+:101D40008C82001000604021AD2200048C820018BB
60117+:101D5000AD2200088C82001CAD22000C8CA2001465
60118+:101D6000AD2200108C820020AD220014908200056C
60119+:101D7000304200FF00021200AD2200188CA20018B1
60120+:101D8000AD22001C8CA2000CAD2200208CA2001001
60121+:101D9000AD2200248CA2001CAD2200288CA20020C1
60122+:101DA000AD22002C3402FFFFAD260030AD20003400
60123+:101DB000506200013408FFFFAD28003850E00011E8
60124+:101DC0003C0280083C048008348401009482005066
60125+:101DD0003042FFFFAD22003C9483004494850044D0
60126+:101DE000240200013063FFFF000318C200641821C1
60127+:101DF0009064006430A5000700A210040A00075C8C
60128+:101E00000044102534420100AD20003C94430044BE
60129+:101E1000944400443063FFFF000318C2006218219D
60130+:101E200030840007906500642402000100821004E1
60131+:101E30000002102700451024A0620064000000008A
60132+:101E400000000000000000003C0200063442004098
60133+:101E50003C038000AC620030000000000000000085
60134+:101E6000000000008C620000304200101040FFFDB6
60135+:101E70003C06800834C201503463040034C7014A70
60136+:101E800034C4013434C5014034C60144AFA200104B
60137+:101E90000E0006D2AF8300288FBF001803E00008B1
60138+:101EA00027BD00208F8300143C0608008CC600E884
60139+:101EB0008F82001C30633FFF000319800046102111
60140+:101EC000004310212403FF80004318243C068000B7
60141+:101ED000ACC300283042007F3C03800C004330211B
60142+:101EE00090C2000D30A500FF0000382134420010E0
60143+:101EF000A0C2000D8F8900143C028008344201000A
60144+:101F00009443004400091382304800032402000176
60145+:101F1000A4C3000E1102000B2902000210400005AC
60146+:101F2000240200021100000C240300010A0007A48F
60147+:101F30000000182111020006000000000A0007A49A
60148+:101F4000000018218CC2002C0A0007A424430001C1
60149+:101F50008CC20014244300018CC200180043102BD3
60150+:101F60005040000A240700012402002714A20003A5
60151+:101F70003C0380080A0007B1240700013463010014
60152+:101F80009462004C24420001A462004C00091382B8
60153+:101F9000304300032C620002104000090080282119
60154+:101FA000146000040000000094C200340A0007C15D
60155+:101FB0003046FFFF8CC600380A0007C10080282188
60156+:101FC000000030213C040800248456C00A000706A3
60157+:101FD0000000000027BDFF90AFB60068AFB50064F9
60158+:101FE000AFB40060AFB3005CAFB20058AFB1005403
60159+:101FF000AFBF006CAFB000508C9000000080B021EB
60160+:102000003C0208008C4200E8960400328F83001CDA
60161+:102010002414FF8030843FFF0062182100042180D7
60162+:1020200000641821007410243C13800000A090214B
60163+:1020300090A50000AE620028920400323C02800CA1
60164+:102040003063007F00628821308400C02402004099
60165+:10205000148200320000A8218E3500388E2200182C
60166+:102060001440000224020001AE2200189202003C3B
60167+:10207000304200201440000E8F83001C000511C068
60168+:102080002442024000621821306400783C02008043
60169+:102090000082202500741824AE630800AE64081086
60170+:1020A0008E2200188E03000800431021AE22001873
60171+:1020B0008E22002C8E230018244200010062182B6F
60172+:1020C0001060004300000000924200002442000122
60173+:1020D000A24200003C0308008C6300F4304200FF81
60174+:1020E00050430001A2400000924200008F84001C77
60175+:1020F000000211C024420240248300403063007F6C
60176+:10210000008220213C02800A0094202400621821D1
60177+:10211000AE6400240A0008D2AEC30000920300326D
60178+:102120002402FFC000431024304200FF1440000589
60179+:1021300024020001AE220018962200340A00084250
60180+:102140003055FFFF8E22001424420001AE220018F9
60181+:102150009202003000021600000216030441001C27
60182+:10216000000000009602003227A400100080282101
60183+:10217000A7A20016960200320000302124070001B9
60184+:102180003042FFFFAF8200140E000706AFA0001C14
60185+:10219000960200328F83001C3C0408008C8400E807
60186+:1021A00030423FFF000211800064182100621821B4
60187+:1021B00000741024AE62002C3063007F3C02800E5D
60188+:1021C000006218219062000D3042007FA062000D75
60189+:1021D0009222000D304200105040007892420000E0
60190+:1021E0003C028008344401009482004C8EC30000FD
60191+:1021F0003C130800967300C62442FFFFA482004CE3
60192+:10220000946200329623000E3054FFFF3070FFFFBF
60193+:102210003C0308008C6300D000701807A7A30038A7
60194+:102220009482003E3063FFFF3042FFFF14620007DC
60195+:10223000000000008C8200303C038000244200300B
60196+:10224000AC62003C0A00086A8C82002C9482004038
60197+:102250003042FFFF5462000927A400408C820038FE
60198+:102260003C03800024420030AC62003C8C8200348D
60199+:10227000AC6200380A0008793C03800027A50038CA
60200+:1022800027A60048026038210E00068FA7A000484C
60201+:102290008FA300403C02800024630030AC43003830
60202+:1022A0008FA30044AC43003C3C0380003C0200058B
60203+:1022B000AC6200303C028008344401009482004249
60204+:1022C000346304003042FFFF0202102B1440000769
60205+:1022D000AF8300289482004E9483004202021021B2
60206+:1022E000004310230A00088F3043FFFF9483004E01
60207+:1022F00094820042026318210050102300621823C8
60208+:102300003063FFFF3C028008344401009482003CAB
60209+:102310003042FFFF14430003000000000A00089F42
60210+:10232000240300019482003C3042FFFF0062102B26
60211+:10233000144000058F8200289482003C0062102324
60212+:102340003043FFFF8F820028AC550000AC400004F2
60213+:10235000AC540008AC43000C3C02000634420010B0
60214+:102360003C038000AC620030000000000000000070
60215+:10237000000000008C620000304200101040FFFDA1
60216+:102380003C04800834840100001018C20064182145
60217+:102390009065006432020007240600010046100424
60218+:1023A00000451025A0620064948300429622000E2E
60219+:1023B00050430001A386001892420000244200010D
60220+:1023C000A24200003C0308008C6300F4304200FF8E
60221+:1023D00050430001A2400000924200008F84001C84
60222+:1023E000000211C0244202402483004000822021C8
60223+:1023F0002402FF80008220243063007F3C02800A98
60224+:10240000006218213C028000AC440024AEC30000EE
60225+:102410008FBF006C8FB600688FB500648FB400600A
60226+:102420008FB3005C8FB200588FB100548FB0005052
60227+:1024300003E0000827BD007027BDFFD8AFB3001C24
60228+:10244000AFB20018AFB10014AFB00010AFBF0020A2
60229+:102450000080982100E0802130B1FFFF0E000D8444
60230+:1024600030D200FF0000000000000000000000006B
60231+:102470008F8200208F830024AC510000AC520004F6
60232+:10248000AC530008AC40000CAC400010AC40001451
60233+:10249000AC4000189463001E02038025AC50001C61
60234+:1024A0000000000000000000000000002404000103
60235+:1024B0008FBF00208FB3001C8FB200188FB10014A3
60236+:1024C0008FB000100A000DB827BD002830A5FFFF0F
60237+:1024D0000A0008DC30C600FF3C02800834430100DB
60238+:1024E0009462000E3C080800950800C63046FFFFC5
60239+:1024F00014C000043402FFFF946500EA0A000929B1
60240+:102500008F84001C10C20027000000009462004E5F
60241+:102510009464003C3045FFFF00A6102300A6182B52
60242+:102520003087FFFF106000043044FFFF00C5102318
60243+:1025300000E210233044FFFF0088102B1040000EF3
60244+:1025400000E810233C028008344401002403000109
60245+:1025500034420080A44300162402FFFFA482000E30
60246+:10256000948500EA8F84001C0000302130A5FFFF15
60247+:102570000A0009013C0760200044102A10400009AD
60248+:102580003C0280083443008094620016304200010F
60249+:10259000104000043C0280009442007E244200145B
60250+:1025A000A462001603E000080000000027BDFFE061
60251+:1025B0003C028008AFBF001CAFB0001834420100DD
60252+:1025C000944300429442004C104000193068FFFFD1
60253+:1025D0009383001824020001146200298FBF001C9D
60254+:1025E0003C06800834D00100000810C200501021C1
60255+:1025F000904200643103000734C70148304200FFB5
60256+:10260000006210073042000134C9014E34C4012C6D
60257+:1026100034C5013E1040001634C601420E0006D2F9
60258+:10262000AFA90010960200420A0009463048FFFF99
60259+:102630003C028008344401009483004494820042A8
60260+:102640001043000F8FBF001C94820044A4820042FC
60261+:1026500094820050A482004E8C820038AC820030FC
60262+:1026600094820040A482003E9482004AA4820048E2
60263+:102670008FBF001C8FB000180A00090427BD00207E
60264+:102680008FB0001803E0000827BD002027BDFFA081
60265+:10269000AFB1004C3C118000AFBF0058AFB3005445
60266+:1026A000AFB20050AFB000483626018890C2000398
60267+:1026B0003044007FA3A400108E32018090C200003D
60268+:1026C0003043007F240200031062003BAF92001CE5
60269+:1026D00028620004104000062402000424020002C4
60270+:1026E000106200098FBF00580A000B0F8FB300540F
60271+:1026F0001062004D240200051062014E8FBF005889
60272+:102700000A000B0F8FB30054000411C002421021C5
60273+:102710002404FF8024420240004410242643004049
60274+:10272000AE2200243063007F3C02800A0062182140
60275+:102730009062003CAFA3003C00441025A062003C26
60276+:102740008FA3003C9062003C304200401040016C7E
60277+:102750008FBF00583C108008A3800018361001007D
60278+:102760008E0200E08C63003427A4003C27A50010F3
60279+:10277000004310210E0007C3AE0200E093A2001038
60280+:102780003C038000A20200D58C6202780440FFFE68
60281+:102790008F82001CAC62024024020002A06202444C
60282+:1027A0003C021000AC6202780E0009390000000003
60283+:1027B0000A000B0E8FBF00583C05800890C3000133
60284+:1027C00090A2000B1443014E8FBF005834A4008028
60285+:1027D0008C8200189082004C90A200083C0260009D
60286+:1027E0008C4254048C8300183C027FFF3442FFFF6C
60287+:1027F000006218243C0208008C4200B4AC8300182C
60288+:102800003C038000244200013C010800AC2200B4DB
60289+:102810008C6201F80440FFFE8F82001CAC6201C094
60290+:102820000A000AD6240200023C10800890C300016E
60291+:102830009202000B144301328FBF005827A40018E6
60292+:1028400036050110240600033C0260008C4254044B
60293+:102850000E000E470000000027A40028360501F0F6
60294+:102860000E000E47240600038FA200283603010045
60295+:10287000AE0200648FA2002CAE0200688FA200306E
60296+:10288000AE02006C93A40018906300D52402FF8070
60297+:102890000082102400431025304900FF3084007F5F
60298+:1028A0003122007F0082102A544000013929008023
60299+:1028B000000411C0244202402403FF800242102180
60300+:1028C00000431024AE220094264200403042007F94
60301+:1028D0003C038006004340218FA3001C2402FFFF1D
60302+:1028E000AFA800403C130800927300F71062003359
60303+:1028F00093A2001995030014304400FF3063FFFFDA
60304+:102900000064182B106000100000000095040014F3
60305+:102910008D07001C8D0600183084FFFF0044202323
60306+:102920000004210000E438210000102100E4202BE5
60307+:1029300000C2302100C43021AD07001CAD060018D4
60308+:102940000A000A2F93A20019950400148D07001C99
60309+:102950008D0600183084FFFF008220230004210030
60310+:10296000000010210080182100C2302300E4202B39
60311+:1029700000C4302300E33823AD07001CAD06001867
60312+:1029800093A200198FA30040A462001497A2001A1A
60313+:10299000A46200168FA2001CAC6200108FA2001C63
60314+:1029A000AC62000C93A20019A462002097A2001A46
60315+:1029B000A46200228FA2001CAC6200243C048008A8
60316+:1029C000348300808C6200388FA20020012088218F
60317+:1029D000AC62003C8FA20020AC82000093A20018E1
60318+:1029E000A062004C93A20018A0820009A0600068B9
60319+:1029F00093A20018105100512407FF803229007F54
60320+:102A0000000911C024420240024210213046007FDA
60321+:102A10003C03800000471024AC6200943C02800616
60322+:102A200000C2302190C2003CAFA60040000020212F
60323+:102A300000471025A0C2003C8FA80040950200026C
60324+:102A4000950300148D07001C3042FFFF3063FFFF29
60325+:102A50008D060018004310230002110000E2382107
60326+:102A600000E2102B00C4302100C23021AD07001C51
60327+:102A7000AD06001895020002A5020014A50000167C
60328+:102A80008D020008AD0200108D020008AD02000C9E
60329+:102A900095020002A5020020A50000228D02000878
60330+:102AA000AD0200249102003C304200401040001A68
60331+:102AB000262200013C108008A3A90038A38000183A
60332+:102AC000361001008E0200E08D03003427A4004080
60333+:102AD00027A50038004310210E0007C3AE0200E016
60334+:102AE00093A200383C038000A20200D58C620278D9
60335+:102AF0000440FFFE8F82001CAC62024024020002F0
60336+:102B0000A06202443C021000AC6202780E00093957
60337+:102B100000000000262200013043007F14730004EF
60338+:102B2000004020212403FF8002231024004320269C
60339+:102B300093A200180A000A4B309100FF93A40018DA
60340+:102B40008FA3001C2402FFFF1062000A308900FFDF
60341+:102B500024820001248300013042007F14530005C9
60342+:102B6000306900FF2403FF800083102400431026F7
60343+:102B7000304900FF3C028008904200080120882173
60344+:102B8000305000FF123000193222007F000211C0C5
60345+:102B900002421021244202402403FF8000431824F3
60346+:102BA0003C048000AC8300943042007F3C038006EC
60347+:102BB000004310218C43000C004020211060000BCA
60348+:102BC000AFA200400E00057E000000002623000199
60349+:102BD0002405FF803062007F145300020225202468
60350+:102BE000008518260A000AAF307100FF3C048008F7
60351+:102BF000348400808C8300183C027FFF3442FFFF46
60352+:102C000000621824AC8300183C0380008C6201F839
60353+:102C10000440FFFE00000000AC7201C0240200026C
60354+:102C2000A06201C43C021000AC6201F80A000B0E65
60355+:102C30008FBF00583C04800890C300019082000BB5
60356+:102C40001443002F8FBF0058349000809202000878
60357+:102C500030420040104000200000000092020008B6
60358+:102C60000002160000021603044100050240202164
60359+:102C70000E000ECC240500930A000B0E8FBF0058E7
60360+:102C80009202000924030018304200FF1443000D93
60361+:102C900002402021240500390E000E64000030217E
60362+:102CA0000E0003328F84001C8F82FF9424030012D5
60363+:102CB000A04300090E00033D8F84001C0A000B0E88
60364+:102CC0008FBF0058240500360E000E64000030212E
60365+:102CD0000A000B0E8FBF00580E0003320240202165
60366+:102CE000920200058F84001C344200200E00033D38
60367+:102CF000A20200050E0010758F84001C8FBF0058C3
60368+:102D00008FB300548FB200508FB1004C8FB0004889
60369+:102D100003E0000827BD00603C0280083445010044
60370+:102D20003C0280008C42014094A3000E0000302140
60371+:102D300000402021AF82001C3063FFFF3402FFFF00
60372+:102D4000106200063C0760202402FFFFA4A2000ED0
60373+:102D500094A500EA0A00090130A5FFFF03E000087E
60374+:102D60000000000027BDFFC83C0280003C06800830
60375+:102D7000AFB5002CAFB1001CAFBF0030AFB400281E
60376+:102D8000AFB30024AFB20020AFB00018345101003F
60377+:102D900034C501008C4301008E2200148CA400E491
60378+:102DA0000000A821AF83001C0044102318400052EB
60379+:102DB000A38000188E22001400005021ACA200E471
60380+:102DC00090C3000890A200D53073007FA3A200102A
60381+:102DD0008CB200E08CB400E4304200FF1053003BA2
60382+:102DE00093A200108F83001C2407FF80000211C0F3
60383+:102DF0000062102124420240246300400047102456
60384+:102E00003063007F3C0980003C08800A006818217C
60385+:102E1000AD2200248C62003427A4001427A50010E2
60386+:102E2000024280210290102304400028AFA3001426
60387+:102E30009062003C00E21024304200FF1440001970
60388+:102E4000020090219062003C34420040A062003CAD
60389+:102E50008F86001C93A3001024C200403042007FE4
60390+:102E6000004828213C0208008C4200F42463000141
60391+:102E7000306400FF14820002A3A30010A3A000107E
60392+:102E800093A20010AFA50014000211C0244202401A
60393+:102E900000C2102100471024AD2200240A000B4577
60394+:102EA00093A200100E0007C3000000003C0280083F
60395+:102EB00034420100AC5000E093A30010240A00014A
60396+:102EC000A04300D50A000B4593A200102402000184
60397+:102ED000154200093C0380008C6202780440FFFE2A
60398+:102EE0008F82001CAC62024024020002A0620244F5
60399+:102EF0003C021000AC6202789222000B2403000214
60400+:102F0000304200FF144300720000000096220008C7
60401+:102F1000304300FF24020082146200402402008437
60402+:102F20003C028000344901008D22000C95230006EC
60403+:102F3000000216023063FFFF3045003F24020027E5
60404+:102F400010A2000FAF83001428A200281040000830
60405+:102F5000240200312402002110A2000924020025CD
60406+:102F600010A20007938200190A000BBD00000000A8
60407+:102F700010A20007938200190A000BBD0000000098
60408+:102F80000E000777012020210A000C3D0000000000
60409+:102F90003C0380008C6202780440FFFE8F82001C9C
60410+:102FA000AC62024024020002A06202443C02100013
60411+:102FB000AC6202780A000C3D000000009523000678
60412+:102FC000912400058D25000C8D2600108D270018FA
60413+:102FD0008D28001C8D290020244200013C0108009E
60414+:102FE000A42356C63C010800A02456C53C01080095
60415+:102FF000AC2556CC3C010800AC2656D03C0108005C
60416+:10300000AC2756D83C010800AC2856DC3C0108002F
60417+:10301000AC2956E00A000C3DA38200191462000A94
60418+:10302000240200813C02800834420100944500EAF9
60419+:10303000922600058F84001C30A5FFFF30C600FFDC
60420+:103040000A000BFE3C0760211462005C00000000D7
60421+:103050009222000A304300FF306200201040000737
60422+:10306000306200403C02800834420100944500EA8E
60423+:103070008F84001C0A000BFC24060040104000074F
60424+:10308000000316003C02800834420100944500EA27
60425+:103090008F84001C0A000BFC24060041000216036A
60426+:1030A000044100463C02800834420100944500EA95
60427+:1030B0008F84001C2406004230A5FFFF3C076019E6
60428+:1030C0000E000901000000000A000C3D0000000095
60429+:1030D0009222000B24040016304200FF1044000628
60430+:1030E0003C0680009222000B24030017304200FFB0
60431+:1030F000144300320000000034C5010090A2000B10
60432+:10310000304200FF1444000B000080218CA20020FC
60433+:103110008CA400202403FF800043102400021140EF
60434+:103120003084007F004410253C032000004310251C
60435+:10313000ACC2083094A2000800021400000214037C
60436+:10314000044200012410000194A2000830420080D3
60437+:103150005040001A0200A82194A20008304220002A
60438+:10316000504000160200A8218CA300183C021C2D20
60439+:10317000344219ED106200110200A8213C0208003F
60440+:103180008C4200D4104000053C0280082403000457
60441+:1031900034420100A04300FC3C028008344201009C
60442+:1031A000944500EA8F84001C2406000630A5FFFF2A
60443+:1031B0000E0009013C0760210200A8210E00093918
60444+:1031C000000000009222000A304200081040000473
60445+:1031D00002A010210E0013790000000002A01021AF
60446+:1031E0008FBF00308FB5002C8FB400288FB3002420
60447+:1031F0008FB200208FB1001C8FB0001803E00008D0
60448+:1032000027BD00382402FF80008220243C02900069
60449+:1032100034420007008220253C028000AC4400209C
60450+:103220003C0380008C6200200440FFFE0000000090
60451+:1032300003E00008000000003C0380002402FF803F
60452+:10324000008220243462000700822025AC64002024
60453+:103250008C6200200440FFFE0000000003E0000834
60454+:103260000000000027BDFFD8AFB3001CAFB10014B1
60455+:10327000AFB00010AFBF0020AFB200183C1180000B
60456+:103280003C0280088E32002034530100AE2400201E
60457+:10329000966300EA000514003C074000004738250B
60458+:1032A00000A08021000030210E0009013065FFFFE1
60459+:1032B000240200A1160200022402FFFFA2620009FC
60460+:1032C000AE3200208FBF00208FB3001C8FB20018D9
60461+:1032D0008FB100148FB0001003E0000827BD002854
60462+:1032E0003C0280082403000527BDFFE834420100AA
60463+:1032F000A04300FCAFBF00103C0280008C420100E4
60464+:10330000240500A1004020210E000C67AF82001CA4
60465+:103310003C0380008C6202780440FFFE8F82001C18
60466+:103320008FBF001027BD0018AC62024024020002CB
60467+:10333000A06202443C021000AC62027803E0000884
60468+:103340000000000027BDFFE83C068000AFBF001072
60469+:1033500034C7010094E20008304400FF3883008243
60470+:10336000388200842C6300012C4200010062182581
60471+:103370001060002D24020083938200195040003B0E
60472+:103380008FBF00103C020800904256CC8CC4010054
60473+:103390003C06080094C656C63045003F38A30032AC
60474+:1033A00038A2003F2C6300012C4200010062182566
60475+:1033B000AF84001CAF860014A380001914600007BE
60476+:1033C00000E020212402002014A2001200000000CE
60477+:1033D0003402FFFF14C2000F00000000240200208E
60478+:1033E00014A2000500E028218CE300142402FFFF52
60479+:1033F0005062000B8FBF00103C040800248456C0AC
60480+:10340000000030210E000706240700010A000CD638
60481+:103410008FBF00100E000777000000008FBF001064
60482+:103420000A00093927BD001814820004240200850F
60483+:103430008CC501040A000CE1000020211482000662
60484+:103440002482FF808CC50104240440008FBF00103B
60485+:103450000A00016727BD0018304200FF2C4200021D
60486+:1034600010400004240200228FBF00100A000B2726
60487+:1034700027BD0018148200048F8200248FBF001023
60488+:103480000A000C8627BD00188C42000C1040001E5C
60489+:1034900000E0282190E300092402001814620003D0
60490+:1034A000240200160A000CFC240300081462000722
60491+:1034B00024020017240300123C02800834420080DA
60492+:1034C000A04300090A000D0994A7000854620007F0
60493+:1034D00094A700088F82FF942404FFFE9043000508
60494+:1034E00000641824A043000594A7000890A6001BC0
60495+:1034F0008CA4000094A500068FBF001000073C00BC
60496+:103500000A0008DC27BD00188FBF001003E0000888
60497+:1035100027BD00188F8500243C04800094A2002A57
60498+:103520008CA30034000230C02402FFF000C210243B
60499+:1035300000621821AC83003C8CA200303C03800068
60500+:10354000AC8200383C02005034420010AC620030C3
60501+:103550000000000000000000000000008C6200007D
60502+:10356000304200201040FFFD30C20008104000062D
60503+:103570003C0280008C620408ACA200208C62040C27
60504+:103580000A000D34ACA200248C430400ACA300203C
60505+:103590008C420404ACA200243C0300203C028000C6
60506+:1035A000AC4300303C0480008C8200300043102487
60507+:1035B0001440FFFD8F8600243C020040AC820030A6
60508+:1035C00094C3002A94C2002894C4002C94C5002EF1
60509+:1035D00024630001004410213064FFFFA4C20028CE
60510+:1035E00014850002A4C3002AA4C0002A03E0000836
60511+:1035F000000000008F84002427BDFFE83C05800404
60512+:1036000024840010AFBF00100E000E472406000AED
60513+:103610008F840024948200129483002E3042000F85
60514+:10362000244200030043180424027FFF0043102BB0
60515+:1036300010400002AC8300000000000D0E000D13CE
60516+:10364000000000008F8300248FBF001027BD0018EA
60517+:10365000946200149463001A3042000F00021500B7
60518+:10366000006218253C02800003E00008AC4300A083
60519+:103670008F8300243C028004944400069462001A64
60520+:103680008C650000A4640016004410233042FFFF44
60521+:103690000045102B03E00008384200018F8400240D
60522+:1036A0003C0780049486001A8C85000094E2000692
60523+:1036B000A482001694E3000600C310233042FFFFEB
60524+:1036C0000045102B384200011440FFF8A483001677
60525+:1036D00003E00008000000008F8400243C02800406
60526+:1036E000944200069483001A8C850000A482001680
60527+:1036F000006210233042FFFF0045102B38420001CA
60528+:103700005040000D8F850024006030213C0780046C
60529+:1037100094E20006A482001694E3000600C310237E
60530+:103720003042FFFF0045102B384200011440FFF8E3
60531+:10373000A48300168F8500243C03800034620400BB
60532+:103740008CA40020AF820020AC6400388CA200243E
60533+:10375000AC62003C3C020005AC62003003E00008B3
60534+:10376000ACA000048F8400243C0300068C8200047B
60535+:1037700000021140004310253C038000AC62003081
60536+:103780000000000000000000000000008C6200004B
60537+:10379000304200101040FFFD34620400AC80000491
60538+:1037A00003E00008AF8200208F86002427BDFFE0E1
60539+:1037B000AFB10014AFB00010AFBF00188CC300044D
60540+:1037C0008CC500248F820020309000FF94C4001A22
60541+:1037D00024630001244200202484000124A7002047
60542+:1037E000ACC30004AF820020A4C4001AACC70024FC
60543+:1037F00004A100060000882104E2000594C2001A1A
60544+:103800008CC2002024420001ACC2002094C2001AE5
60545+:1038100094C300282E040001004310262C4200010E
60546+:10382000004410245040000594C2001A24020001F4
60547+:10383000ACC2000894C2001A94C300280010202BC8
60548+:10384000004310262C4200010044102514400007BC
60549+:10385000000000008CC20008144000042402001084
60550+:103860008CC300041462000F8F8500240E000DA786
60551+:10387000241100018F820024944300289442001AEE
60552+:1038800014430003000000000E000D1300000000B0
60553+:10389000160000048F8500240E000D840000000037
60554+:1038A0008F85002494A2001E94A4001C24420001D1
60555+:1038B0003043FFFF14640002A4A2001EA4A0001E57
60556+:1038C0001200000A3C02800494A2001494A3001A7F
60557+:1038D0003042000F00021500006218253C028000F3
60558+:1038E000AC4300A00A000E1EACA0000894420006E3
60559+:1038F00094A3001A8CA40000A4A200160062102356
60560+:103900003042FFFF0044102B384200011040000DF0
60561+:1039100002201021006030213C07800494E2000660
60562+:10392000A4A2001694E3000600C310233042FFFF58
60563+:103930000044102B384200011440FFF8A4A30016E5
60564+:10394000022010218FBF00188FB100148FB000101B
60565+:1039500003E0000827BD002003E00008000000008D
60566+:103960008F82002C3C03000600021140004310250A
60567+:103970003C038000AC62003000000000000000004A
60568+:10398000000000008C620000304200101040FFFD7B
60569+:1039900034620400AF82002803E00008AF80002CEE
60570+:1039A00003E000080000102103E000080000000010
60571+:1039B0003084FFFF30A5FFFF0000182110800007B2
60572+:1039C000000000003082000110400002000420428C
60573+:1039D000006518210A000E3D0005284003E000089C
60574+:1039E0000060102110C0000624C6FFFF8CA200005A
60575+:1039F00024A50004AC8200000A000E4724840004C1
60576+:103A000003E000080000000010A0000824A3FFFF4E
60577+:103A1000AC86000000000000000000002402FFFF50
60578+:103A20002463FFFF1462FFFA2484000403E000080B
60579+:103A3000000000003C0280083442008024030001A2
60580+:103A4000AC43000CA4430010A4430012A443001490
60581+:103A500003E00008A44300168F82002427BDFFD88E
60582+:103A6000AFB3001CAFB20018AFB10014AFB000107C
60583+:103A7000AFBF00208C47000C248200802409FF8007
60584+:103A80003C08800E3043007F008080213C0A80008B
60585+:103A9000004920240068182130B100FF30D200FF17
60586+:103AA00010E000290000982126020100AD44002CFE
60587+:103AB000004928243042007F004820219062000005
60588+:103AC00024030050304200FF1443000400000000B3
60589+:103AD000AD45002C948200EA3053FFFF0E000D84A8
60590+:103AE000000000008F8200248F83002000112C0032
60591+:103AF0009442001E001224003484000100A22825F4
60592+:103B00003C02400000A22825AC7000008FBF0020BE
60593+:103B1000AC6000048FB20018AC7300088FB10014C1
60594+:103B2000AC60000C8FB3001CAC6400108FB00010B0
60595+:103B3000AC60001424040001AC60001827BD00280C
60596+:103B40000A000DB8AC65001C8FBF00208FB3001CAD
60597+:103B50008FB200188FB100148FB0001003E000087E
60598+:103B600027BD00283C06800034C201009043000FAE
60599+:103B7000240200101062000E2865001110A000073A
60600+:103B800024020012240200082405003A10620006F4
60601+:103B90000000302103E0000800000000240500358B
60602+:103BA0001462FFFC000030210A000E6400000000D7
60603+:103BB0008CC200748F83FF9424420FA003E000089E
60604+:103BC000AC62000C27BDFFE8AFBF00100E0003423F
60605+:103BD000240500013C0480088FBF0010240200016E
60606+:103BE00034830080A462001227BD00182402000163
60607+:103BF00003E00008A080001A27BDFFE0AFB2001864
60608+:103C0000AFB10014AFB00010AFBF001C30B2FFFF67
60609+:103C10000E000332008088213C028008345000806E
60610+:103C20009202000924030004304200FF1443000CF8
60611+:103C30003C028008124000082402000A0E000E5BBD
60612+:103C400000000000920200052403FFFE0043102440
60613+:103C5000A202000524020012A20200093C02800810
60614+:103C600034420080022020210E00033DA0400027A6
60615+:103C700016400003022020210E000EBF00000000AD
60616+:103C800002202021324600FF8FBF001C8FB2001897
60617+:103C90008FB100148FB00010240500380A000E64A4
60618+:103CA00027BD002027BDFFE0AFBF001CAFB200184A
60619+:103CB000AFB10014AFB000100E00033200808021BD
60620+:103CC0000E000E5B000000003C02800834450080BE
60621+:103CD00090A2000924120018305100FF1232000394
60622+:103CE0000200202124020012A0A2000990A20005D7
60623+:103CF0002403FFFE004310240E00033DA0A2000594
60624+:103D00000200202124050020163200070000302187
60625+:103D10008FBF001C8FB200188FB100148FB000103D
60626+:103D20000A00034227BD00208FBF001C8FB200187D
60627+:103D30008FB100148FB00010240500390A000E6402
60628+:103D400027BD002027BDFFE83C028000AFB0001077
60629+:103D5000AFBF0014344201009442000C2405003629
60630+:103D60000080802114400012304600FF0E00033214
60631+:103D7000000000003C02800834420080240300124E
60632+:103D8000A043000990430005346300100E000E5B51
60633+:103D9000A04300050E00033D020020210200202167
60634+:103DA0000E000342240500200A000F3C0000000022
60635+:103DB0000E000E64000000000E00033202002021FD
60636+:103DC0003C0280089043001B2405FF9F0200202135
60637+:103DD000006518248FBF00148FB00010A043001B93
60638+:103DE0000A00033D27BD001827BDFFE0AFBF001844
60639+:103DF000AFB10014AFB0001030B100FF0E000332BD
60640+:103E0000008080213C02800824030012344200809C
60641+:103E10000E000E5BA04300090E00033D02002021AE
60642+:103E200002002021022030218FBF00188FB1001422
60643+:103E30008FB00010240500350A000E6427BD002055
60644+:103E40003C0480089083000E9082000A1443000B0B
60645+:103E5000000028218F82FF942403005024050001D4
60646+:103E600090420000304200FF1443000400000000B4
60647+:103E70009082000E24420001A082000E03E00008A0
60648+:103E800000A010213C0380008C6201F80440FFFE7A
60649+:103E900024020002AC6401C0A06201C43C02100014
60650+:103EA00003E00008AC6201F827BDFFE0AFB20018E4
60651+:103EB0003C128008AFB10014AFBF001CAFB00010BF
60652+:103EC00036510080922200092403000A304200FF8C
60653+:103ED0001443003E000000008E4300048E22003890
60654+:103EE000506200808FBF001C92220000240300500B
60655+:103EF000304200FF144300253C0280008C42014008
60656+:103F00008E4300043642010002202821AC43001CED
60657+:103F10009622005C8E2300383042FFFF00021040E2
60658+:103F200000621821AE23001C8E4300048E2400384A
60659+:103F30009622005C006418233042FFFF0003184300
60660+:103F4000000210400043102A10400006000000004C
60661+:103F50008E4200048E230038004310230A000FAA6B
60662+:103F6000000220439622005C3042FFFF0002204006
60663+:103F70003C0280083443010034420080ACA4002C91
60664+:103F8000A040002424020001A062000C0E000F5E7D
60665+:103F900000000000104000538FBF001C3C02800056
60666+:103FA0008C4401403C0380008C6201F80440FFFE19
60667+:103FB00024020002AC6401C0A06201C43C021000F3
60668+:103FC000AC6201F80A0010078FBF001C92220009A2
60669+:103FD00024030010304200FF144300043C02800020
60670+:103FE0008C4401400A000FEE0000282192220009B3
60671+:103FF00024030016304200FF14430006240200147C
60672+:10400000A22200093C0280008C4401400A001001F9
60673+:104010008FBF001C8E2200388E23003C00431023EB
60674+:10402000044100308FBF001C92220027244200016F
60675+:10403000A2220027922200272C42000414400016DE
60676+:104040003C1080009222000924030004304200FF4B
60677+:10405000144300093C0280008C4401408FBF001CC7
60678+:104060008FB200188FB100148FB000102405009398
60679+:104070000A000ECC27BD00208C440140240500938B
60680+:104080008FBF001C8FB200188FB100148FB00010CA
60681+:104090000A000F4827BD00208E0401400E000332A5
60682+:1040A000000000008E4200042442FFFFAE420004E4
60683+:1040B0008E22003C2442FFFFAE22003C0E00033D56
60684+:1040C0008E0401408E0401408FBF001C8FB2001887
60685+:1040D0008FB100148FB00010240500040A000342C1
60686+:1040E00027BD00208FB200188FB100148FB00010D0
60687+:1040F00003E0000827BD00203C0680008CC2018838
60688+:104100003C038008346500809063000E00021402B6
60689+:10411000304400FF306300FF1464000E3C0280084E
60690+:1041200090A20026304200FF104400098F82FF94C5
60691+:10413000A0A400262403005090420000304200FF5B
60692+:1041400014430006000000000A0005A18CC4018091
60693+:104150003C02800834420080A044002603E00008AE
60694+:104160000000000027BDFFE030E700FFAFB20018FD
60695+:10417000AFBF001CAFB10014AFB0001000809021A1
60696+:1041800014E0000630C600FF000000000000000D33
60697+:10419000000000000A001060240001163C038008A3
60698+:1041A0009062000E304200FF14460023346200800B
60699+:1041B00090420026304200FF1446001F000000001D
60700+:1041C0009062000F304200FF1446001B0000000008
60701+:1041D0009062000A304200FF144600038F90FF9463
60702+:1041E0000000000D8F90FF948F82FF983C1180009B
60703+:1041F000AE05003CAC450000A066000A0E0003328C
60704+:104200008E240100A20000240E00033D8E24010034
60705+:104210003C0380008C6201F80440FFFE240200028F
60706+:10422000AC7201C0A06201C43C021000AC6201F893
60707+:104230000A0010618FBF001C000000000000000D8C
60708+:10424000000000002400013F8FBF001C8FB2001847
60709+:104250008FB100148FB0001003E0000827BD0020CC
60710+:104260008F83FF943C0280008C44010034420100A3
60711+:104270008C65003C9046001B0A00102724070001B3
60712+:104280003C0280089043000E9042000A0043102632
60713+:10429000304200FF03E000080002102B27BDFFE0C2
60714+:1042A0003C028008AFB10014AFB00010AFBF0018DF
60715+:1042B0003450008092020005240300303042003068
60716+:1042C00014430085008088218F8200248C42000CDA
60717+:1042D000104000828FBF00180E000D840000000007
60718+:1042E0008F860020ACD100009202000892030009E2
60719+:1042F000304200FF00021200306300FF004310252F
60720+:10430000ACC200049202004D000216000002160327
60721+:1043100004410005000000003C0308008C630048D5
60722+:104320000A00109F3C1080089202000830420040B2
60723+:10433000144000030000182192020027304300FFC0
60724+:104340003C108008361100809222004D00031E00B0
60725+:10435000304200FF0002140000621825ACC30008C0
60726+:104360008E2400308F820024ACC4000C8E250034D3
60727+:104370009443001E3C02C00BACC50010006218251F
60728+:104380008E22003800002021ACC200148E22003C96
60729+:10439000ACC200180E000DB8ACC3001C8E020004A5
60730+:1043A0008F8400203C058000AC8200008E2200201B
60731+:1043B000AC8200048E22001CAC8200088E220058C1
60732+:1043C0008CA3007400431021AC82000C8E22002CC0
60733+:1043D000AC8200108E2200408E23004400021400A4
60734+:1043E00000431025AC8200149222004D240300806B
60735+:1043F000304200FF1443000400000000AC800018AD
60736+:104400000A0010E38F8200248E23000C2402000196
60737+:104410001062000E2402FFFF92220008304200408A
60738+:104420001440000A2402FFFF8E23000C8CA20074AB
60739+:10443000006218233C0208000062102414400002AD
60740+:10444000000028210060282100051043AC820018DC
60741+:104450008F820024000020219443001E3C02C00CE7
60742+:10446000006218258F8200200E000DB8AC43001C9E
60743+:104470003C038008346201008C4200008F850020DC
60744+:10448000346300808FBF0018ACA20000ACA0000411
60745+:104490008C6400488F8200248FB10014ACA4000803
60746+:1044A000ACA0000CACA00010906300059446001E68
60747+:1044B0003C02400D00031E0000C23025ACA30014D6
60748+:1044C0008FB00010ACA0001824040001ACA6001CA2
60749+:1044D0000A000DB827BD00208FBF00188FB100144F
60750+:1044E0008FB0001003E0000827BD00203C028000D0
60751+:1044F0009443007C3C02800834460100308400FF75
60752+:104500003065FFFF2402000524A34650A0C4000C20
60753+:104510005482000C3065FFFF90C2000D2C42000752
60754+:104520001040000724A30A0090C3000D24020014C9
60755+:104530000062100400A210210A00111F3045FFFF85
60756+:104540003065FFFF3C0280083442008003E0000831
60757+:10455000A44500143C03800834680080AD05003891
60758+:10456000346701008CE2001C308400FF00A210239D
60759+:104570001840000330C600FF24A2FFFCACE2001C80
60760+:1045800030820001504000083C0380088D02003C4E
60761+:1045900000A2102304410012240400058C620004D0
60762+:1045A00010A2000F3C0380088C62000414A2001EBD
60763+:1045B000000000003C0208008C4200D8304200207D
60764+:1045C000104000093C0280083462008090630008BB
60765+:1045D0009042004C144300043C0280082404000470
60766+:1045E0000A00110900000000344300803442010039
60767+:1045F000A040000C24020001A462001410C0000AB4
60768+:104600003C0280008C4401003C0380008C6201F875
60769+:104610000440FFFE24020002AC6401C0A06201C499
60770+:104620003C021000AC6201F803E00008000000004A
60771+:1046300027BDFFE800A61823AFBF00101860008058
60772+:10464000308800FF3C02800834470080A0E000244E
60773+:1046500034440100A0E000278C82001C00A210233B
60774+:1046600004400056000000008CE2003C94E3005C33
60775+:104670008CE4002C004530233063FFFF00C3182179
60776+:104680000083202B1080000400E018218CE2002C15
60777+:104690000A00117800A2102194E2005C3042FFFF72
60778+:1046A00000C2102100A21021AC62001C3C02800854
60779+:1046B000344400809482005C8C83001C3042FFFFF5
60780+:1046C0000002104000A210210043102B10400004F3
60781+:1046D000000000008C82001C0A00118B3C06800840
60782+:1046E0009482005C3042FFFF0002104000A21021C3
60783+:1046F0003C06800834C3010034C70080AC82001C33
60784+:10470000A060000CACE500388C62001C00A21023F5
60785+:104710001840000224A2FFFCAC62001C3102000120
60786+:10472000104000083C0380088CE2003C00A21023EB
60787+:1047300004410012240400058CC2000410A20010E1
60788+:104740008FBF00108C62000414A2004F8FBF0010B6
60789+:104750003C0208008C4200D8304200201040000A81
60790+:104760003C02800834620080906300089042004C54
60791+:10477000144300053C028008240400048FBF00108D
60792+:104780000A00110927BD001834430080344201009B
60793+:10479000A040000C24020001A46200143C0280002E
60794+:1047A0008C4401003C0380008C6201F80440FFFE51
60795+:1047B000240200020A0011D8000000008CE2001C54
60796+:1047C000004610230043102B54400001ACE5001CB0
60797+:1047D00094E2005C3042FFFF0062102B144000079F
60798+:1047E0002402000294E2005C8CE3001C3042FFFFD4
60799+:1047F00000621821ACE3001C24020002ACE5003882
60800+:104800000E000F5EA082000C1040001F8FBF001032
60801+:104810003C0280008C4401003C0380008C6201F863
60802+:104820000440FFFE24020002AC6401C0A06201C487
60803+:104830003C021000AC6201F80A0011F08FBF0010BA
60804+:1048400031020010104000108FBF00103C028008A1
60805+:10485000344500808CA3001C94A2005C00661823E1
60806+:104860003042FFFF006218213C023FFF3444FFFF4B
60807+:104870000083102B544000010080182100C3102138
60808+:10488000ACA2001C8FBF001003E0000827BD001879
60809+:1048900027BDFFE800C0402100A63023AFBF0010B5
60810+:1048A00018C00026308A00FF3C028008344900808E
60811+:1048B0008D24001C8D23002C008820230064182BDD
60812+:1048C0001060000F344701008CE2002000461021E8
60813+:1048D000ACE200208CE200200044102B1440000BBE
60814+:1048E0003C023FFF8CE2002000441023ACE2002099
60815+:1048F0009522005C3042FFFF0A0012100082202146
60816+:10490000ACE00020008620213C023FFF3443FFFF43
60817+:104910000064102B54400001006020213C028008FC
60818+:104920003442008000851821AC43001CA0400024C4
60819+:10493000A04000270A0012623C03800831420010A8
60820+:10494000104000433C0380083C06800834C40080CB
60821+:104950008C82003C004810235840003E34660080A2
60822+:104960009082002424420001A0820024908200242E
60823+:104970003C0308008C630024304200FF0043102BEE
60824+:10498000144000688FBF001034C201008C42001C2C
60825+:1049900000A2102318400063000000008CC3000434
60826+:1049A0009482005C006818233042FFFF0003184324
60827+:1049B000000210400043102A1040000500000000D3
60828+:1049C0008CC20004004810230A0012450002104364
60829+:1049D0009482005C3042FFFF000210403C068008D9
60830+:1049E000AC82002C34C5008094A2005C8CA4002C06
60831+:1049F00094A3005C3042FFFF00021040008220219F
60832+:104A00003063FFFF0083202101041021ACA2001CB1
60833+:104A10008CC2000434C60100ACC2001C2402000297
60834+:104A20000E000F5EA0C2000C1040003E8FBF0010B1
60835+:104A30003C0280008C4401003C0380008C6201F841
60836+:104A40000440FFFE240200020A001292000000004F
60837+:104A500034660080ACC50038346401008C82001CD0
60838+:104A600000A210231840000224A2FFFCAC82001C0C
60839+:104A7000314200015040000A3C0380088CC2003CD7
60840+:104A800000A2102304430014240400058C620004D7
60841+:104A900014A200033C0380080A00128424040005C9
60842+:104AA0008C62000414A2001F8FBF00103C0208009B
60843+:104AB0008C4200D8304200201040000A3C0280089E
60844+:104AC00034620080906300089042004C144300055B
60845+:104AD0003C028008240400048FBF00100A00110962
60846+:104AE00027BD00183443008034420100A040000C70
60847+:104AF00024020001A46200143C0280008C440100E6
60848+:104B00003C0380008C6201F80440FFFE2402000296
60849+:104B1000AC6401C0A06201C43C021000AC6201F8A8
60850+:104B20008FBF001003E0000827BD001827BDFFE875
60851+:104B30003C0A8008AFBF0010354900808D22003C40
60852+:104B400000C04021308400FF004610231840009D23
60853+:104B500030E700FF354701002402000100A63023A2
60854+:104B6000A0E0000CA0E0000DA522001418C0002455
60855+:104B7000308200108D23001C8D22002C0068182329
60856+:104B80000043102B1040000F000000008CE20020BA
60857+:104B900000461021ACE200208CE200200043102BE4
60858+:104BA0001440000B3C023FFF8CE200200043102326
60859+:104BB000ACE200209522005C3042FFFF0A0012C1E7
60860+:104BC00000621821ACE00020006618213C023FFF83
60861+:104BD0003446FFFF00C3102B5440000100C01821D1
60862+:104BE0003C0280083442008000651821AC43001C60
60863+:104BF000A0400024A04000270A00130F3C038008B7
60864+:104C0000104000403C0380088D22003C00481023E7
60865+:104C10005840003D34670080912200242442000166
60866+:104C2000A1220024912200243C0308008C6300246C
60867+:104C3000304200FF0043102B1440009A8FBF001039
60868+:104C40008CE2001C00A21023184000960000000017
60869+:104C50008D4300049522005C006818233042FFFF5A
60870+:104C600000031843000210400043102A10400005C2
60871+:104C7000012020218D420004004810230A0012F276
60872+:104C8000000210439522005C3042FFFF00021040FA
60873+:104C90003C068008AC82002C34C5008094A2005CE5
60874+:104CA0008CA4002C94A3005C3042FFFF0002104053
60875+:104CB000008220213063FFFF0083182101031021AF
60876+:104CC000ACA2001C8CC2000434C60100ACC2001CA3
60877+:104CD000240200020E000F5EA0C2000C1040007102
60878+:104CE0008FBF00103C0280008C4401003C03800018
60879+:104CF0008C6201F80440FFFE240200020A0013390E
60880+:104D00000000000034670080ACE500383466010024
60881+:104D10008CC2001C00A210231840000224A2FFFC39
60882+:104D2000ACC2001C30820001504000083C038008E7
60883+:104D30008CE2003C00A2102304430051240400052F
60884+:104D40008C62000410A2003E3C0380088C620004C8
60885+:104D500054A200548FBF00103C0208008C4200D8BF
60886+:104D600030420020104000063C028008346200807F
60887+:104D7000906300089042004C104300403C028008C1
60888+:104D80003443008034420100A040000C24020001A2
60889+:104D9000A46200143C0280008C4401003C038000AB
60890+:104DA0008C6201F80440FFFE24020002AC6401C0E2
60891+:104DB000A06201C43C021000AC6201F80A00137743
60892+:104DC0008FBF001024020005A120002714E2000A72
60893+:104DD0003C038008354301009062000D2C42000620
60894+:104DE000504000053C0380089062000D2442000101
60895+:104DF000A062000D3C03800834670080ACE50038F9
60896+:104E0000346601008CC2001C00A21023184000026E
60897+:104E100024A2FFFCACC2001C308200015040000AFA
60898+:104E20003C0380088CE2003C00A2102304410014E3
60899+:104E3000240400058C62000414A200033C038008D3
60900+:104E40000A00136E240400058C62000414A20015ED
60901+:104E50008FBF00103C0208008C4200D83042002076
60902+:104E60001040000A3C028008346200809063000811
60903+:104E70009042004C144300053C02800824040004C6
60904+:104E80008FBF00100A00110927BD001834430080AD
60905+:104E900034420100A040000C24020001A46200146E
60906+:104EA0008FBF001003E0000827BD00183C0B8008EE
60907+:104EB00027BDFFE83C028000AFBF00103442010074
60908+:104EC000356A00809044000A356901008C45001461
60909+:104ED0008D4800389123000C308400FF0105102319
60910+:104EE0001C4000B3306700FF2CE20006504000B1C8
60911+:104EF0008FBF00102402000100E2300430C2000322
60912+:104F00005440000800A8302330C2000C144000A117
60913+:104F100030C20030144000A38FBF00100A00143BC1
60914+:104F20000000000018C00024308200108D43001CD7
60915+:104F30008D42002C006818230043102B1040000FF6
60916+:104F4000000000008D22002000461021AD2200202C
60917+:104F50008D2200200043102B1440000B3C023FFF29
60918+:104F60008D22002000431023AD2200209542005CDA
60919+:104F70003042FFFF0A0013AF00621821AD2000206D
60920+:104F8000006618213C023FFF3446FFFF00C3102B90
60921+:104F90005440000100C018213C02800834420080C7
60922+:104FA00000651821AC43001CA0400024A04000274D
60923+:104FB0000A0013FD3C038008104000403C038008B9
60924+:104FC0008D42003C004810231840003D34670080AB
60925+:104FD0009142002424420001A14200249142002475
60926+:104FE0003C0308008C630024304200FF0043102B78
60927+:104FF000144000708FBF00108D22001C00A21023EF
60928+:105000001840006C000000008D6300049542005CB5
60929+:10501000006818233042FFFF0003184300021040CD
60930+:105020000043102A10400005014020218D62000439
60931+:10503000004810230A0013E0000210439542005C70
60932+:105040003042FFFF000210403C068008AC82002C7A
60933+:1050500034C5008094A2005C8CA4002C94A3005C56
60934+:105060003042FFFF00021040008220213063FFFF2A
60935+:105070000083182101031021ACA2001C8CC2000483
60936+:1050800034C60100ACC2001C240200020E000F5EF8
60937+:10509000A0C2000C104000478FBF00103C028000EF
60938+:1050A0008C4401003C0380008C6201F80440FFFE48
60939+:1050B000240200020A00142D000000003467008062
60940+:1050C000ACE50038346601008CC2001C00A210233D
60941+:1050D0001840000224A2FFFCACC2001C3082000178
60942+:1050E0005040000A3C0380088CE2003C00A21023E0
60943+:1050F00004430014240400058C62000414A200037D
60944+:105100003C0380080A00141F240400058C6200047C
60945+:1051100014A200288FBF00103C0208008C4200D867
60946+:10512000304200201040000A3C02800834620080B7
60947+:10513000906300089042004C144300053C02800834
60948+:10514000240400048FBF00100A00110927BD0018B5
60949+:105150003443008034420100A040000C24020001CE
60950+:10516000A46200143C0280008C4401003C038000D7
60951+:105170008C6201F80440FFFE24020002AC6401C00E
60952+:10518000A06201C43C021000AC6201F80A00143BAA
60953+:105190008FBF00108FBF0010010030210A00115A8C
60954+:1051A00027BD0018010030210A00129927BD001800
60955+:1051B0008FBF001003E0000827BD00183C038008E3
60956+:1051C0003464010024020003A082000C8C620004FD
60957+:1051D00003E00008AC82001C3C05800834A300807A
60958+:1051E0009062002734A501002406004324420001F8
60959+:1051F000A0620027906300273C0208008C42004810
60960+:10520000306300FF146200043C07602194A500EAAB
60961+:105210000A00090130A5FFFF03E0000800000000BC
60962+:1052200027BDFFE8AFBF00103C0280000E00144411
60963+:105230008C4401803C02800834430100A060000CD3
60964+:105240008C4200048FBF001027BD001803E0000847
60965+:10525000AC62001C27BDFFE03C028008AFBF001815
60966+:10526000AFB10014AFB000103445008034460100E7
60967+:105270003C0880008D09014090C3000C8CA4003CC8
60968+:105280008CA200381482003B306700FF9502007C3E
60969+:1052900090A30027146000093045FFFF2402000599
60970+:1052A00054E200083C04800890C2000D2442000132
60971+:1052B000A0C2000D0A00147F3C048008A0C0000DAD
60972+:1052C0003C048008348201009042000C2403000555
60973+:1052D000304200FF1443000A24A205DC348300801E
60974+:1052E000906200272C4200075040000524A20A00CB
60975+:1052F00090630027240200140062100400A2102111
60976+:105300003C108008361000803045FFFF012020212E
60977+:105310000E001444A60500149602005C8E030038AB
60978+:105320003C1180003042FFFF000210400062182153
60979+:10533000AE03001C0E0003328E24014092020025B1
60980+:1053400034420040A20200250E00033D8E2401409D
60981+:105350008E2401403C0380008C6201F80440FFFE73
60982+:1053600024020002AC6401C0A06201C43C0210002F
60983+:10537000AC6201F88FBF00188FB100148FB000101D
60984+:1053800003E0000827BD00203C0360103C02080039
60985+:1053900024420174AC62502C8C6250003C048000AA
60986+:1053A00034420080AC6250003C0208002442547C2D
60987+:1053B0003C010800AC2256003C020800244254384C
60988+:1053C0003C010800AC2256043C020002AC840008F8
60989+:1053D000AC82000C03E000082402000100A0302190
60990+:1053E0003C1C0800279C56083C0200023C050400B7
60991+:1053F00000852826008220260004102B2CA5000101
60992+:105400002C840001000210803C0308002463560035
60993+:105410000085202500431821108000030000102182
60994+:10542000AC6600002402000103E000080000000058
60995+:105430003C1C0800279C56083C0200023C05040066
60996+:1054400000852826008220260004102B2CA50001B0
60997+:105450002C840001000210803C03080024635600E5
60998+:105460000085202500431821108000050000102130
60999+:105470003C02080024425438AC62000024020001BF
61000+:1054800003E00008000000003C0200023C030400AE
61001+:1054900000821026008318262C4200012C63000194
61002+:1054A000004310251040000B000028213C1C080080
61003+:1054B000279C56083C0380008C62000824050001EC
61004+:1054C00000431025AC6200088C62000C00441025DB
61005+:1054D000AC62000C03E0000800A010213C1C080096
61006+:1054E000279C56083C0580008CA3000C0004202754
61007+:1054F000240200010064182403E00008ACA3000C9F
61008+:105500003C020002148200063C0560008CA208D018
61009+:105510002403FFFE0043102403E00008ACA208D0DF
61010+:105520003C02040014820005000000008CA208D098
61011+:105530002403FFFD00431024ACA208D003E00008C0
61012+:10554000000000003C02601A344200108C430080CE
61013+:1055500027BDFFF88C440084AFA3000093A3000094
61014+:10556000240200041462001AAFA4000493A20001F4
61015+:105570001040000797A300023062FFFC3C0380004C
61016+:10558000004310218C4200000A001536AFA200042F
61017+:105590003062FFFC3C03800000431021AC4400005B
61018+:1055A000A3A000003C0560008CA208D02403FFFEED
61019+:1055B0003C04601A00431024ACA208D08FA300045E
61020+:1055C0008FA2000034840010AC830084AC82008081
61021+:1055D00003E0000827BD000827BDFFE8AFBF0010AB
61022+:1055E0003C1C0800279C56083C0280008C43000CA1
61023+:1055F0008C420004004318243C0200021060001496
61024+:10560000006228243C0204003C04000210A00005B3
61025+:10561000006210243C0208008C4256000A00155B10
61026+:1056200000000000104000073C0404003C02080099
61027+:105630008C4256040040F809000000000A00156082
61028+:10564000000000000000000D3C1C0800279C5608CC
61029+:105650008FBF001003E0000827BD0018800802403B
61030+:1056600080080100800800808008000000000C8095
61031+:105670000000320008000E9808000EF408000F88A1
61032+:1056800008001028080010748008010080080080BD
61033+:10569000800800000A000028000000000000000050
61034+:1056A0000000000D6370362E322E316200000000C3
61035+:1056B00006020104000000000000000000000000DD
61036+:1056C000000000000000000038003C000000000066
61037+:1056D00000000000000000000000000000000020AA
61038+:1056E00000000000000000000000000000000000BA
61039+:1056F00000000000000000000000000000000000AA
61040+:10570000000000000000000021003800000000013F
61041+:105710000000002B000000000000000400030D400A
61042+:105720000000000000000000000000000000000079
61043+:105730000000000000000000100000030000000056
61044+:105740000000000D0000000D3C020800244259AC8E
61045+:105750003C03080024635BF4AC4000000043202BB2
61046+:105760001480FFFD244200043C1D080037BD9FFC4F
61047+:1057700003A0F0213C100800261000A03C1C0800EB
61048+:10578000279C59AC0E0002F6000000000000000D3E
61049+:1057900027BDFFB4AFA10000AFA20004AFA3000873
61050+:1057A000AFA4000CAFA50010AFA60014AFA700185F
61051+:1057B000AFA8001CAFA90020AFAA0024AFAB0028FF
61052+:1057C000AFAC002CAFAD0030AFAE0034AFAF00389F
61053+:1057D000AFB8003CAFB90040AFBC0044AFBF004819
61054+:1057E0000E000820000000008FBF00488FBC00445E
61055+:1057F0008FB900408FB8003C8FAF00388FAE0034B7
61056+:105800008FAD00308FAC002C8FAB00288FAA002406
61057+:105810008FA900208FA8001C8FA700188FA6001446
61058+:105820008FA500108FA4000C8FA300088FA2000486
61059+:105830008FA1000027BD004C3C1B60188F7A5030B0
61060+:10584000377B502803400008AF7A000000A01821E1
61061+:1058500000801021008028213C0460003C0760008B
61062+:105860002406000810600006348420788C42000072
61063+:10587000ACE220088C63000003E00008ACE3200CDD
61064+:105880000A000F8100000000240300403C02600079
61065+:1058900003E00008AC4320003C0760008F86000452
61066+:1058A0008CE520740086102100A2182B14600007DC
61067+:1058B000000028218F8AFDA024050001A1440013C7
61068+:1058C0008F89000401244021AF88000403E0000810
61069+:1058D00000A010218F84FDA08F8500049086001306
61070+:1058E00030C300FF00A31023AF82000403E00008D0
61071+:1058F000A08000138F84FDA027BDFFE8AFB000108B
61072+:10590000AFBF001490890011908700112402002875
61073+:10591000312800FF3906002830E300FF2485002CE1
61074+:105920002CD00001106200162484001C0E00006EB2
61075+:10593000000000008F8FFDA03C05600024020204DF
61076+:1059400095EE003E95ED003C000E5C0031ACFFFF93
61077+:10595000016C5025ACAA2010520000012402000462
61078+:10596000ACA22000000000000000000000000000C9
61079+:105970008FBF00148FB0001003E0000827BD00188F
61080+:105980000A0000A6000028218F85FDA027BDFFD8B2
61081+:10599000AFBF0020AFB3001CAFB20018AFB100140E
61082+:1059A000AFB000100080982190A4001124B0001C1A
61083+:1059B00024B1002C308300FF386200280E000090D4
61084+:1059C0002C5200010E00009800000000020020216F
61085+:1059D0001240000202202821000028210E00006E43
61086+:1059E000000000008F8DFDA03C0880003C05600099
61087+:1059F00095AC003E95AB003C02683025000C4C0095
61088+:105A0000316AFFFF012A3825ACA7201024020202C8
61089+:105A1000ACA6201452400001240200028FBF0020D7
61090+:105A20008FB3001C8FB200188FB100148FB000101C
61091+:105A300027BD002803E00008ACA2200027BDFFE03E
61092+:105A4000AFB20018AFB10014AFB00010AFBF001C70
61093+:105A50003C1160008E2320748F82000430D0FFFF41
61094+:105A600030F2FFFF1062000C2406008F0E00006E63
61095+:105A7000000000003C06801F0010440034C5FF00F9
61096+:105A80000112382524040002AE2720100000302126
61097+:105A9000AE252014AE2420008FBF001C8FB200184A
61098+:105AA0008FB100148FB0001000C0102103E0000877
61099+:105AB00027BD002027BDFFE0AFB0001030D0FFFFB2
61100+:105AC000AFBF0018AFB100140E00006E30F1FFFF41
61101+:105AD00000102400009180253C036000AC70201071
61102+:105AE0008FBF00188FB100148FB000102402000483
61103+:105AF000AC62200027BD002003E000080000102158
61104+:105B000027BDFFE03C046018AFBF0018AFB1001420
61105+:105B1000AFB000108C8850002403FF7F34028071E6
61106+:105B20000103382434E5380C241F00313C1980006F
61107+:105B3000AC8550003C11800AAC8253BCAF3F0008DA
61108+:105B40000E00054CAF9100400E00050A3C116000AC
61109+:105B50000E00007D000000008E3008083C0F570941
61110+:105B60002418FFF00218602435EEE00035EDF00057
61111+:105B7000018E5026018D58262D4600012D69000109
61112+:105B8000AF86004C0E000D09AF8900503C06601630
61113+:105B90008CC700003C0860148D0500A03C03FFFF8B
61114+:105BA00000E320243C02535300052FC2108200550D
61115+:105BB00034D07C00960201F2A780006C10400003F4
61116+:105BC000A780007C384B1E1EA78B006C960201F844
61117+:105BD000104000048F8D0050384C1E1EA78C007C96
61118+:105BE0008F8D005011A000058F83004C240E0020E3
61119+:105BF000A78E007CA78E006C8F83004C1060000580
61120+:105C00009785007C240F0020A78F007CA78F006C55
61121+:105C10009785007C2CB8008153000001240500808A
61122+:105C20009784006C2C91040152200001240404008C
61123+:105C30001060000B3C0260008FBF00188FB1001491
61124+:105C40008FB0001027BD0020A784006CA785007CC2
61125+:105C5000A380007EA780007403E00008A780009264
61126+:105C60008C4704382419103C30FFFFFF13F9000360
61127+:105C700030A8FFFF1100004624030050A380007EDF
61128+:105C80009386007E50C00024A785007CA780007CFE
61129+:105C90009798007CA780006CA7800074A780009272
61130+:105CA0003C010800AC3800800E00078700000000AF
61131+:105CB0003C0F60008DED0808240EFFF03C0B600ED9
61132+:105CC000260C0388356A00100000482100002821B6
61133+:105CD00001AE20243C105709AF8C0010AF8A004859
61134+:105CE000AF89001810900023AF8500148FBF0018F3
61135+:105CF0008FB100148FB0001027BD002003E0000812
61136+:105D0000AF80005400055080014648218D260004D4
61137+:105D10000A00014800D180219798007CA784006C7C
61138+:105D2000A7800074A78000923C010800AC38008076
61139+:105D30000E000787000000003C0F60008DED080892
61140+:105D4000240EFFF03C0B600E260C0388356A001011
61141+:105D5000000048210000282101AE20243C105709F2
61142+:105D6000AF8C0010AF8A0048AF8900181490FFDF95
61143+:105D7000AF85001424110001AF9100548FBF0018AB
61144+:105D80008FB100148FB0001003E0000827BD002081
61145+:105D90000A00017BA383007E3083FFFF8F880040D1
61146+:105DA0008F87003C000321403C0580003C020050EE
61147+:105DB000008248253C0660003C0A010034AC040027
61148+:105DC0008CCD08E001AA58241160000500000000F5
61149+:105DD0008CCF08E024E7000101EA7025ACCE08E092
61150+:105DE0008D19001001805821ACB900388D180014AD
61151+:105DF000ACB8003CACA9003000000000000000007E
61152+:105E00000000000000000000000000000000000092
61153+:105E100000000000000000003C0380008C640000D3
61154+:105E2000308200201040FFFD3C0F60008DED08E047
61155+:105E30003C0E010001AE18241460FFE100000000D8
61156+:105E4000AF87003C03E00008AF8B00588F8500400F
61157+:105E5000240BFFF03C06800094A7001A8CA90024B4
61158+:105E600030ECFFFF000C38C000EB5024012A402129
61159+:105E7000ACC8003C8CA400248CC3003C00831023DD
61160+:105E800018400033000000008CAD002025A2000166
61161+:105E90003C0F0050ACC2003835EE00103C068000CC
61162+:105EA000ACCE003000000000000000000000000048
61163+:105EB00000000000000000000000000000000000E2
61164+:105EC000000000003C0480008C9900003338002062
61165+:105ED0001300FFFD30E20008104000173C0980006D
61166+:105EE0008C880408ACA800108C83040CACA30014AC
61167+:105EF0003C1900203C188000AF19003094AE001807
61168+:105F000094AF001C01CF3021A4A6001894AD001A54
61169+:105F100025A70001A4A7001A94AB001A94AC001E98
61170+:105F2000118B00030000000003E0000800000000E7
61171+:105F300003E00008A4A0001A8D2A0400ACAA0010F7
61172+:105F40008D240404ACA400140A0002183C1900209B
61173+:105F50008CA200200A0002003C0F00500A0001EE53
61174+:105F60000000000027BDFFE8AFBF00100E000232A6
61175+:105F7000000000008F8900408FBF00103C038000AC
61176+:105F8000A520000A9528000A9527000427BD0018BF
61177+:105F90003105FFFF30E6000F0006150000A22025A6
61178+:105FA00003E00008AC6400803C0508008CA50020DC
61179+:105FB0008F83000C27BDFFE8AFB00010AFBF001407
61180+:105FC00010A300100000802124040001020430040A
61181+:105FD00000A6202400C3102450440006261000010F
61182+:105FE000001018802787FDA41480000A006718217C
61183+:105FF000261000012E0900025520FFF38F83000CAC
61184+:10600000AF85000C8FBF00148FB0001003E00008B4
61185+:1060100027BD00188C6800003C058000ACA8002457
61186+:106020000E000234261000013C0508008CA500205B
61187+:106030000A0002592E0900022405000100851804F7
61188+:106040003C0408008C84002027BDFFC8AFBF00348B
61189+:1060500000831024AFBE0030AFB7002CAFB60028CD
61190+:10606000AFB50024AFB40020AFB3001CAFB200182E
61191+:10607000AFB1001410400051AFB000108F84004049
61192+:10608000948700069488000A00E8302330D5FFFF8B
61193+:1060900012A0004B8FBF0034948B0018948C000A20
61194+:1060A000016C50233142FFFF02A2482B1520000251
61195+:1060B00002A02021004020212C8F000515E00002C5
61196+:1060C00000809821241300040E0001C102602021E9
61197+:1060D0008F87004002609021AF80004494F4000A52
61198+:1060E000026080211260004E3291FFFF3C1670006A
61199+:1060F0003C1440003C1E20003C1760008F99005863
61200+:106100008F380000031618241074004F0283F82BF8
61201+:1061100017E0003600000000107E00478F86004424
61202+:1061200014C0003A2403000102031023022320219B
61203+:106130003050FFFF1600FFF13091FFFF8F870040C6
61204+:106140003C1100203C108000AE11003094EB000A9E
61205+:106150003C178000024B5021A4EA000A94E9000A8F
61206+:1061600094E800043123FFFF3106000F00062D00E4
61207+:106170000065F025AEFE008094F3000A94F6001846
61208+:1061800012D30036001221408CFF00148CF4001052
61209+:1061900003E468210000C02101A4782B029870213B
61210+:1061A00001CF6021ACED0014ACEC001002B238233A
61211+:1061B00030F5FFFF16A0FFB88F8400408FBF00347A
61212+:1061C0008FBE00308FB7002C8FB600288FB500240B
61213+:1061D0008FB400208FB3001C8FB200188FB1001451
61214+:1061E0008FB0001003E0000827BD00381477FFCC03
61215+:1061F0008F8600440E000EE202002021004018218C
61216+:106200008F86004410C0FFC9020310230270702360
61217+:106210008F87004001C368210A0002E431B2FFFF0A
61218+:106220008F86004414C0FFC93C1100203C10800040
61219+:106230000A0002AEAE1100300E00046602002021FA
61220+:106240000A0002DB00401821020020210E0009395B
61221+:10625000022028210A0002DB004018210E0001EE76
61222+:10626000000000000A0002C702B2382327BDFFC8A1
61223+:10627000AFB7002CAFB60028AFB50024AFB40020F4
61224+:10628000AFB3001CAFB20018AFB10014AFB0001034
61225+:10629000AFBF00300E00011B241300013C047FFF40
61226+:1062A0003C0380083C0220003C010800AC20007048
61227+:1062B0003496FFFF34770080345200033C1512C03F
61228+:1062C000241400013C1080002411FF800E000245C0
61229+:1062D000000000008F8700488F8B00188F89001402
61230+:1062E0008CEA00EC8CE800E8014B302B01092823F4
61231+:1062F00000A6102314400006014B18231440000E82
61232+:106300003C05800002A3602B1180000B0000000000
61233+:106310003C0560008CEE00EC8CED00E88CA4180CC1
61234+:10632000AF8E001804800053AF8D00148F8F0010C3
61235+:10633000ADF400003C0580008CBF00003BF900017B
61236+:10634000333800011700FFE13C0380008C6201003C
61237+:1063500024060C0010460009000000008C680100B3
61238+:106360002D043080548000103C0480008C690100B2
61239+:106370002D2331811060000C3C0480008CAA0100A8
61240+:1063800011460004000020218CA6010024C5FF81D5
61241+:1063900030A400FF8E0B01000E000269AE0B00243A
61242+:1063A0000A00034F3C0480008C8D01002DAC3300AB
61243+:1063B00011800022000000003C0708008CE70098D4
61244+:1063C00024EE00013C010800AC2E00983C04800043
61245+:1063D0008C8201001440000300000000566000148D
61246+:1063E0003C0440008C9F01008C9801000000982123
61247+:1063F00003F1C82400193940330F007F00EF7025E6
61248+:1064000001D26825AC8D08308C8C01008C85010090
61249+:10641000258B0100017130240006514030A3007F1C
61250+:106420000143482501324025AC8808303C04400037
61251+:10643000AE0401380A00030E000000008C99010030
61252+:10644000240F0020AC99002092F80000330300FFD5
61253+:10645000106F000C241F0050547FFFDD3C048000AF
61254+:106460008C8401000E00154E000000000A00034F4E
61255+:106470003C04800000963824ACA7180C0A000327BF
61256+:106480008F8F00108C8501000E0008F72404008017
61257+:106490000A00034F3C04800000A4102B24030001D9
61258+:1064A00010400009000030210005284000A4102BF6
61259+:1064B00004A00003000318405440FFFC00052840DE
61260+:1064C0005060000A0004182B0085382B54E00004AB
61261+:1064D0000003184200C33025008520230003184222
61262+:1064E0001460FFF9000528420004182B03E000089F
61263+:1064F00000C310213084FFFF30C600FF3C0780003E
61264+:106500008CE201B80440FFFE00064C000124302557
61265+:106510003C08200000C820253C031000ACE00180AE
61266+:10652000ACE50184ACE4018803E00008ACE301B809
61267+:106530003C0660008CC5201C2402FFF03083020062
61268+:10654000308601001060000E00A2282434A500014E
61269+:106550003087300010E0000530830C0034A50004C3
61270+:106560003C04600003E00008AC85201C1060FFFDC7
61271+:106570003C04600034A5000803E00008AC85201C42
61272+:1065800054C0FFF334A500020A0003B03087300086
61273+:1065900027BDFFE8AFB00010AFBF00143C0760009C
61274+:1065A000240600021080001100A080218F83005873
61275+:1065B0000E0003A78C6400188F8200580000202171
61276+:1065C000240600018C45000C0E000398000000001A
61277+:1065D0001600000224020003000010218FBF0014E7
61278+:1065E0008FB0001003E0000827BD00188CE8201CC5
61279+:1065F0002409FFF001092824ACE5201C8F870058EE
61280+:106600000A0003CD8CE5000C3C02600E00804021A6
61281+:1066100034460100240900180000000000000000BA
61282+:10662000000000003C0A00503C0380003547020097
61283+:10663000AC68003834640400AC65003CAC670030E2
61284+:106640008C6C0000318B00201160FFFD2407FFFFE0
61285+:106650002403007F8C8D00002463FFFF248400044A
61286+:10666000ACCD00001467FFFB24C60004000000004E
61287+:10667000000000000000000024A402000085282B78
61288+:106680003C0300203C0E80002529FFFF010540212E
61289+:10669000ADC300301520FFE00080282103E0000892
61290+:1066A000000000008F82005827BDFFD8AFB3001C48
61291+:1066B000AFBF0020AFB20018AFB10014AFB00010F0
61292+:1066C00094460002008098218C5200182CC300814F
61293+:1066D0008C4800048C4700088C51000C8C49001039
61294+:1066E000106000078C4A00142CC4000414800013AE
61295+:1066F00030EB000730C5000310A0001000000000C0
61296+:106700002410008B02002021022028210E00039873
61297+:10671000240600031660000224020003000010217A
61298+:106720008FBF00208FB3001C8FB200188FB10014F0
61299+:106730008FB0001003E0000827BD00281560FFF1AE
61300+:106740002410008B3C0C80003C030020241F00011F
61301+:10675000AD830030AF9F0044000000000000000047
61302+:10676000000000002419FFF024D8000F031978243A
61303+:106770003C1000D0AD88003801F0702524CD000316
61304+:106780003C08600EAD87003C35850400AD8E0030BE
61305+:10679000000D38823504003C3C0380008C6B000007
61306+:1067A000316200201040FFFD0000000010E00008F2
61307+:1067B00024E3FFFF2407FFFF8CA800002463FFFFF2
61308+:1067C00024A50004AC8800001467FFFB24840004A7
61309+:1067D0003C05600EACA60038000000000000000080
61310+:1067E000000000008F8600543C0400203C0780001D
61311+:1067F000ACE4003054C000060120202102402021DA
61312+:106800000E0003A7000080210A00041D02002021C1
61313+:106810000E0003DD01402821024020210E0003A7C5
61314+:10682000000080210A00041D0200202127BDFFE096
61315+:10683000AFB200183092FFFFAFB10014AFBF001C21
61316+:10684000AFB000101640000D000088210A0004932C
61317+:106850000220102124050003508500278CE5000C40
61318+:106860000000000D262800013111FFFF24E2002066
61319+:106870000232802B12000019AF8200588F82004430
61320+:10688000144000168F8700583C0670003C0320001F
61321+:106890008CE5000000A62024148300108F84006083
61322+:1068A000000544023C09800000A980241480FFE90F
61323+:1068B000310600FF2CCA000B5140FFEB26280001D7
61324+:1068C000000668803C0E080025CE575801AE6021B6
61325+:1068D0008D8B0000016000080000000002201021E4
61326+:1068E0008FBF001C8FB200188FB100148FB0001042
61327+:1068F00003E0000827BD00200E0003982404008454
61328+:106900001600FFD88F8700580A000474AF8000601B
61329+:10691000020028210E0003BF240400018F870058C5
61330+:106920000A000474AF820060020028210E0003BF39
61331+:10693000000020210A0004A38F8700580E000404E1
61332+:10694000020020218F8700580A000474AF82006083
61333+:1069500030AFFFFF000F19C03C0480008C9001B8DD
61334+:106960000600FFFE3C1920043C181000AC83018097
61335+:10697000AC800184AC990188AC9801B80A00047518
61336+:106980002628000190E2000390E30002000020218D
61337+:106990000002FE0000033A0000FF2825240600083C
61338+:1069A0000E000398000000001600FFDC2402000324
61339+:1069B0008F870058000010210A000474AF82006025
61340+:1069C00090E8000200002021240600090A0004C308
61341+:1069D00000082E0090E4000C240900FF308500FF21
61342+:1069E00010A900150000302190F9000290F8000372
61343+:1069F000308F00FF94EB000400196E000018740043
61344+:106A0000000F62000186202501AE5025014B28258C
61345+:106A10003084FF8B0A0004C32406000A90E30002BE
61346+:106A200090FF0004000020210003360000DF28252D
61347+:106A30000A0004C32406000B0A0004D52406008BB8
61348+:106A4000000449C23127003F000443423C02800059
61349+:106A500000082040240316802CE60020AC43002CC4
61350+:106A600024EAFFE02482000114C0000330A900FFE3
61351+:106A700000801021314700FF000260803C0D800043
61352+:106A8000240A0001018D20213C0B000E00EA28049D
61353+:106A9000008B302111200005000538278CCE000026
61354+:106AA00001C5382503E00008ACC700008CD8000001
61355+:106AB0000307782403E00008ACCF000027BDFFE007
61356+:106AC000AFB10014AFB00010AFBF00183C076000BA
61357+:106AD0008CE408083402F0003C1160003083F000C0
61358+:106AE000240501C03C04800E000030211062000625
61359+:106AF000241000018CEA08083149F0003928E00030
61360+:106B00000008382B000780403C0D0200AE2D081411
61361+:106B1000240C16803C0B80008E2744000E000F8B47
61362+:106B2000AD6C002C120000043C02169124050001FB
61363+:106B3000120500103C023D2C345800E0AE384408E9
61364+:106B40003C1108008E31007C8FBF00183C066000AD
61365+:106B500000118540360F16808FB100148FB00010E1
61366+:106B60003C0E020027BD0020ACCF442003E000080B
61367+:106B7000ACCE08103C0218DA345800E0AE384408B5
61368+:106B80003C1108008E31007C8FBF00183C0660006D
61369+:106B900000118540360F16808FB100148FB00010A1
61370+:106BA0003C0E020027BD0020ACCF442003E00008CB
61371+:106BB000ACCE08100A0004EB240500010A0004EB27
61372+:106BC0000000282124020400A7820024A780001CC2
61373+:106BD000000020213C06080024C65A582405FFFF67
61374+:106BE00024890001000440803124FFFF01061821A0
61375+:106BF0002C87002014E0FFFAAC6500002404040098
61376+:106C0000A7840026A780001E000020213C06080063
61377+:106C100024C65AD82405FFFF248D0001000460809B
61378+:106C200031A4FFFF018658212C8A00201540FFFA6D
61379+:106C3000AD650000A7800028A7800020A780002263
61380+:106C4000000020213C06080024C65B582405FFFFF5
61381+:106C5000249900010004C0803324FFFF030678213B
61382+:106C60002C8E000415C0FFFAADE500003C05600065
61383+:106C70008CA73D002403E08F00E31024344601403C
61384+:106C800003E00008ACA63D002487007F000731C266
61385+:106C900024C5FFFF000518C2246400013082FFFFF5
61386+:106CA000000238C0A78400303C010800AC27003047
61387+:106CB000AF80002C0000282100002021000030219E
61388+:106CC0002489000100A728213124FFFF2CA81701E7
61389+:106CD000110000032C8300801460FFF924C600011A
61390+:106CE00000C02821AF86002C10C0001DA786002AF6
61391+:106CF00024CAFFFF000A11423C08080025085B581F
61392+:106D00001040000A00002021004030212407FFFF2E
61393+:106D1000248E00010004688031C4FFFF01A86021B7
61394+:106D20000086582B1560FFFAAD87000030A2001FC7
61395+:106D30005040000800043080240300010043C804D0
61396+:106D400000041080004878212738FFFF03E0000886
61397+:106D5000ADF8000000C820212405FFFFAC8500002D
61398+:106D600003E000080000000030A5FFFF30C6FFFF71
61399+:106D700030A8001F0080602130E700FF0005294295
61400+:106D80000000502110C0001D24090001240B000147
61401+:106D900025180001010B2004330800FF0126782686
61402+:106DA000390E00202DED00012DC2000101A2182591
61403+:106DB0001060000D014450250005C880032C4021BF
61404+:106DC0000100182110E0000F000A20278D040000A8
61405+:106DD000008A1825AD03000024AD00010000402109
61406+:106DE0000000502131A5FFFF252E000131C9FFFF12
61407+:106DF00000C9102B1040FFE72518000103E0000830
61408+:106E0000000000008D0A0000014440240A0005D162
61409+:106E1000AC68000027BDFFE830A5FFFF30C6FFFFCC
61410+:106E2000AFB00010AFBF001430E7FFFF00005021EB
61411+:106E30003410FFFF0000602124AF001F00C0482174
61412+:106E4000241800012419002005E0001601E010219B
61413+:106E50000002F943019F682A0009702B01AE40240B
61414+:106E600011000017000C18800064102110E00005CC
61415+:106E70008C4B000000F840040008382301675824B8
61416+:106E800000003821154000410000402155600016E7
61417+:106E90003169FFFF258B0001316CFFFF05E1FFEC3D
61418+:106EA00001E0102124A2003E0002F943019F682A5C
61419+:106EB0000009702B01AE40241500FFEB000C188078
61420+:106EC000154600053402FFFF020028210E0005B51B
61421+:106ED00000003821020010218FBF00148FB0001075
61422+:106EE00003E0000827BD00181520000301601821E9
61423+:106EF000000B1C0224080010306A00FF154000053A
61424+:106F0000306E000F250D000800031A0231A800FFA3
61425+:106F1000306E000F15C00005307F000325100004FF
61426+:106F200000031902320800FF307F000317E000055C
61427+:106F3000386900012502000200031882304800FF72
61428+:106F4000386900013123000110600004310300FFA3
61429+:106F5000250A0001314800FF310300FF000C6940A1
61430+:106F600001A34021240A000110CAFFD53110FFFF00
61431+:106F7000246E000131C800FF1119FFC638C9000195
61432+:106F80002D1F002053E0001C258B0001240D000163
61433+:106F90000A000648240E002051460017258B0001E8
61434+:106FA00025090001312800FF2D0900205120001281
61435+:106FB000258B000125430001010D5004014B1024D5
61436+:106FC000250900011440FFF4306AFFFF3127FFFF5D
61437+:106FD00010EE000C2582FFFF304CFFFF0000502117
61438+:106FE0003410FFFF312800FF2D0900205520FFF24B
61439+:106FF00025430001258B0001014648260A000602B0
61440+:10700000316CFFFF00003821000050210A000654B7
61441+:107010003410FFFF27BDFFD8AFB0001030F0FFFFE6
61442+:10702000AFB10014001039423211FFE000071080A8
61443+:10703000AFB3001C00B1282330D3FFFFAFB200185C
61444+:1070400030A5FFFF00809021026030210044202104
61445+:10705000AFBF00200E0005E03207001F022288218A
61446+:107060003403FFFF0240202102002821026030216A
61447+:1070700000003821104300093231FFFF02201021A7
61448+:107080008FBF00208FB3001C8FB200188FB1001487
61449+:107090008FB0001003E0000827BD00280E0005E0B7
61450+:1070A0000000000000408821022010218FBF002036
61451+:1070B0008FB3001C8FB200188FB100148FB0001076
61452+:1070C00003E0000827BD0028000424003C03600002
61453+:1070D000AC603D0810A00002348210063482101605
61454+:1070E00003E00008AC623D0427BDFFE0AFB0001034
61455+:1070F000309000FF2E020006AFBF001810400008BD
61456+:10710000AFB10014001030803C03080024635784A2
61457+:1071100000C328218CA400000080000800000000AB
61458+:10712000000020218FBF00188FB100148FB0001015
61459+:107130000080102103E0000827BD00209791002A5D
61460+:1071400016200051000020213C020800904200332C
61461+:107150000A0006BB00000000978D002615A0003134
61462+:10716000000020210A0006BB2402000897870024A3
61463+:1071700014E0001A00001821006020212402000100
61464+:107180001080FFE98FBF0018000429C2004530219C
61465+:1071900000A6582B1160FFE43C0880003C0720004B
61466+:1071A000000569C001A76025AD0C00203C038008E4
61467+:1071B0002402001F2442FFFFAC6000000441FFFDD9
61468+:1071C0002463000424A5000100A6702B15C0FFF560
61469+:1071D000000569C00A0006A58FBF00189787001C2C
61470+:1071E0003C04080024845A58240504000E0006605C
61471+:1071F00024060001978B002424440001308AFFFFFD
61472+:107200002569FFFF2D48040000402821150000409B
61473+:10721000A789002424AC3800000C19C00A0006B964
61474+:10722000A780001C9787001E3C04080024845AD8BD
61475+:10723000240504000E00066024060001979900262C
61476+:10724000244400013098FFFF272FFFFF2F0E04007A
61477+:107250000040882115C0002CA78F0026A780001EA3
61478+:107260003A020003262401003084FFFF0E00068D41
61479+:107270002C4500010011F8C027F00100001021C0CA
61480+:107280000A0006BB240200089785002E978700227B
61481+:107290003C04080024845B580E00066024060001AC
61482+:1072A0009787002A8F89002C2445000130A8FFFF12
61483+:1072B00024E3FFFF0109302B0040802114C0001897
61484+:1072C000A783002AA7800022978500300E000F7543
61485+:1072D00002002021244A05003144FFFF0E00068DE4
61486+:1072E000240500013C05080094A500320E000F752E
61487+:1072F00002002021244521003C0208009042003376
61488+:107300000A0006BB000521C00A0006F3A784001E80
61489+:1073100024AC3800000C19C00A0006B9A784001C70
61490+:107320000A00070DA7850022308400FF27BDFFE873
61491+:107330002C820006AFBF0014AFB000101040001543
61492+:1073400000A03821000440803C0308002463579CBF
61493+:10735000010328218CA40000008000080000000028
61494+:1073600024CC007F000751C2000C59C23170FFFFCE
61495+:107370002547C40030E5FFFF2784001C02003021B0
61496+:107380000E0005B52407000197860028020620217B
61497+:10739000A78400288FBF00148FB0001003E00008FE
61498+:1073A00027BD00183C0508008CA50030000779C2F5
61499+:1073B0000E00038125E4DF003045FFFF3C04080098
61500+:1073C00024845B58240600010E0005B52407000143
61501+:1073D000978E002A8FBF00148FB0001025CD0001BA
61502+:1073E00027BD001803E00008A78D002A0007C9C2C6
61503+:1073F0002738FF00001878C231F0FFFF3C04080076
61504+:1074000024845AD802002821240600010E0005B564
61505+:1074100024070001978D0026260E0100000E84002F
61506+:1074200025AC00013C0B6000A78C0026AD603D0838
61507+:1074300036040006000030213C0760008CE23D0469
61508+:10744000305F000617E0FFFD24C9000100061B00A5
61509+:10745000312600FF006440252CC50004ACE83D0443
61510+:1074600014A0FFF68FBF00148FB0001003E00008D7
61511+:1074700027BD0018000751C22549C8002406000195
61512+:10748000240700013C04080024845A580E0005B566
61513+:107490003125FFFF978700248FBF00148FB00010A5
61514+:1074A00024E6000127BD001803E00008A786002499
61515+:1074B0003C0660183C090800252900FCACC9502C8A
61516+:1074C0008CC850003C0580003C020002350700805B
61517+:1074D000ACC750003C04080024841FE03C030800B3
61518+:1074E00024631F98ACA50008ACA2000C3C01080066
61519+:1074F000AC2459A43C010800AC2359A803E00008BF
61520+:107500002402000100A030213C1C0800279C59AC3B
61521+:107510003C0C04003C0B0002008B3826008C4026FB
61522+:107520002CE200010007502B2D050001000A4880C5
61523+:107530003C030800246359A4004520250123182199
61524+:107540001080000300001021AC660000240200013E
61525+:1075500003E00008000000003C1C0800279C59AC18
61526+:107560003C0B04003C0A0002008A3026008B3826BF
61527+:107570002CC200010006482B2CE5000100094080C8
61528+:107580003C030800246359A4004520250103182169
61529+:1075900010800005000010213C0C0800258C1F986D
61530+:1075A000AC6C00002402000103E0000800000000B1
61531+:1075B0003C0900023C080400008830260089382677
61532+:1075C0002CC30001008028212CE400010083102539
61533+:1075D0001040000B000030213C1C0800279C59ACD7
61534+:1075E0003C0A80008D4E00082406000101CA68256F
61535+:1075F000AD4D00088D4C000C01855825AD4B000C9D
61536+:1076000003E0000800C010213C1C0800279C59AC76
61537+:107610003C0580008CA6000C0004202724020001F9
61538+:1076200000C4182403E00008ACA3000C3C020002D4
61539+:107630001082000B3C0560003C070400108700032B
61540+:107640000000000003E00008000000008CA908D042
61541+:10765000240AFFFD012A402403E00008ACA808D05A
61542+:107660008CA408D02406FFFE0086182403E000083E
61543+:10767000ACA308D03C05601A34A600108CC300806F
61544+:1076800027BDFFF88CC50084AFA3000093A40000C1
61545+:107690002402001010820003AFA5000403E00008DC
61546+:1076A00027BD000893A7000114E0001497AC000266
61547+:1076B00097B800023C0F8000330EFFFC01CF682119
61548+:1076C000ADA50000A3A000003C0660008CC708D058
61549+:1076D0002408FFFE3C04601A00E82824ACC508D04A
61550+:1076E0008FA300048FA200003499001027BD00086A
61551+:1076F000AF22008003E00008AF2300843C0B800031
61552+:10770000318AFFFC014B48218D2800000A00080C3B
61553+:10771000AFA8000427BDFFE8AFBF00103C1C080065
61554+:10772000279C59AC3C0580008CA4000C8CA2000462
61555+:107730003C0300020044282410A0000A00A31824DF
61556+:107740003C0604003C0400021460000900A610245A
61557+:107750001440000F3C0404000000000D3C1C080015
61558+:10776000279C59AC8FBF001003E0000827BD00180C
61559+:107770003C0208008C4259A40040F80900000000B7
61560+:107780003C1C0800279C59AC0A0008358FBF00102C
61561+:107790003C0208008C4259A80040F8090000000093
61562+:1077A0000A00083B000000003C0880008D0201B880
61563+:1077B0000440FFFE35090180AD2400003C031000A9
61564+:1077C00024040040AD250004A1240008A1260009DE
61565+:1077D000A527000A03E00008AD0301B83084FFFFCD
61566+:1077E0000080382130A5FFFF000020210A00084555
61567+:1077F000240600803087FFFF8CA400002406003898
61568+:107800000A000845000028218F8300788F860070C9
61569+:107810001066000B008040213C07080024E75B68ED
61570+:10782000000328C000A710218C440000246300013D
61571+:10783000108800053063000F5466FFFA000328C06B
61572+:1078400003E00008000010213C07080024E75B6CFF
61573+:1078500000A7302103E000088CC200003C03900028
61574+:1078600034620001008220253C038000AC640020CB
61575+:107870008C65002004A0FFFE0000000003E000086B
61576+:10788000000000003C0280003443000100832025FA
61577+:1078900003E00008AC44002027BDFFE0AFB10014B6
61578+:1078A0003091FFFFAFB00010AFBF001812200013DF
61579+:1078B00000A080218CA20000240400022406020003
61580+:1078C0001040000F004028210E0007250000000096
61581+:1078D00000001021AE000000022038218FBF0018E8
61582+:1078E0008FB100148FB0001000402021000028212B
61583+:1078F000000030210A00084527BD00208CA20000AE
61584+:10790000022038218FBF00188FB100148FB00010F3
61585+:107910000040202100002821000030210A000845F5
61586+:1079200027BD002000A010213087FFFF8CA5000498
61587+:107930008C4400000A000845240600068F83FD9C45
61588+:1079400027BDFFE8AFBF0014AFB00010906700087C
61589+:10795000008010210080282130E600400000202116
61590+:1079600010C000088C5000000E0000BD0200202155
61591+:10797000020020218FBF00148FB000100A000548BC
61592+:1079800027BD00180E0008A4000000000E0000BD76
61593+:1079900002002021020020218FBF00148FB00010B0
61594+:1079A0000A00054827BD001827BDFFE0AFB0001052
61595+:1079B0008F90FD9CAFBF001CAFB20018AFB1001498
61596+:1079C00092060001008088210E00087230D2000467
61597+:1079D00092040005001129C2A6050000348300406E
61598+:1079E000A20300050E00087C022020210E00054A9B
61599+:1079F0000220202124020001AE02000C02202821D6
61600+:107A0000A602001024040002A602001224060200AE
61601+:107A1000A60200140E000725A60200161640000F4D
61602+:107A20008FBF001C978C00743C0B08008D6B007896
61603+:107A30002588FFFF3109FFFF256A0001012A382B45
61604+:107A400010E00006A78800743C0F6006240E0016A4
61605+:107A500035ED0010ADAE00508FBF001C8FB2001886
61606+:107A60008FB100148FB0001003E0000827BD002084
61607+:107A700027BDFFE0AFB10014AFBF0018AFB00010DA
61608+:107A80001080000400A088212402008010820007DA
61609+:107A9000000000000000000D8FBF00188FB100141F
61610+:107AA0008FB0001003E0000827BD00200E00087210
61611+:107AB00000A020218F86FD9C0220202190C500057A
61612+:107AC0000E00087C30B000FF2403003E1603FFF1D7
61613+:107AD0003C0680008CC401780480FFFE34C801405D
61614+:107AE000240900073C071000AD11000002202021EE
61615+:107AF000A10900048FBF00188FB100148FB00010CF
61616+:107B0000ACC701780A0008C527BD002027BDFFE0EB
61617+:107B1000AFB00010AFBF0018AFB100143C10800030
61618+:107B20008E110020000000000E00054AAE04002067
61619+:107B3000AE1100208FBF00188FB100148FB000105D
61620+:107B400003E0000827BD00203084FFFF00803821BB
61621+:107B50002406003500A020210A0008450000282145
61622+:107B60003084FFFF008038212406003600A0202149
61623+:107B70000A0008450000282127BDFFD0AFB500242A
61624+:107B80003095FFFFAFB60028AFB40020AFBF002C88
61625+:107B9000AFB3001CAFB20018AFB10014AFB000100B
61626+:107BA00030B6FFFF12A000270000A0218F920058DE
61627+:107BB0008E4300003C0680002402004000033E0289
61628+:107BC00000032C0230E4007F006698241482001D1C
61629+:107BD00030A500FF8F8300682C68000A1100001098
61630+:107BE0008F8D0044000358803C0C0800258C57B84A
61631+:107BF000016C50218D4900000120000800000000A8
61632+:107C000002D4302130C5FFFF0E0008522404008446
61633+:107C1000166000028F920058AF8000688F8D00447C
61634+:107C20002659002026980001032090213314FFFFDD
61635+:107C300015A00004AF9900580295202B1480FFDC9A
61636+:107C400000000000028010218FBF002C8FB600289A
61637+:107C50008FB500248FB400208FB3001C8FB20018A2
61638+:107C60008FB100148FB0001003E0000827BD003072
61639+:107C70002407003414A70149000000009247000EB9
61640+:107C80008F9FFDA08F90FD9C24181600A3E700197C
61641+:107C90009242000D3C0880003C07800CA3E20018D3
61642+:107CA000964A00123C0D60003C117FFFA60A005C62
61643+:107CB000964400103623FFFF240200053099FFFF91
61644+:107CC000AE1900548E46001CAD1800288CEF000041
61645+:107CD0008DAE444801E6482601C93021AE06003881
61646+:107CE0008E05003824CB00013C0E7F00AE05003C21
61647+:107CF0008E0C003CAFEC0004AE0B00208E13002075
61648+:107D0000AE13001CA3E0001BAE03002CA3E2001284
61649+:107D10008E4A001424130050AE0A00348E0400343E
61650+:107D2000AFE400148E590018AE1900489258000CA8
61651+:107D3000A218004E920D000835AF0020A20F0008D7
61652+:107D40008E090018012E282434AC4000AE0C001817
61653+:107D5000920B0000317200FF1253027F2403FF8058
61654+:107D60003C04080024845BE80E0008AA0000000020
61655+:107D70003C1108008E315BE80E00087202202021C1
61656+:107D80002405000424080001A2050025022020216A
61657+:107D90000E00087CA20800053C0580008CB001782C
61658+:107DA0000600FFFE8F92005834AE0140240F0002FF
61659+:107DB0003C091000ADD10000A1CF0004ACA90178AE
61660+:107DC0000A000962AF8000682CAD003751A0FF9413
61661+:107DD0008F8D0044000580803C110800263157E05B
61662+:107DE000021178218DEE000001C0000800000000A3
61663+:107DF0002411000414B1008C3C0780003C080800EA
61664+:107E00008D085BE88F86FD9CACE800208E4500085D
61665+:107E10008F99FDA0240D0050ACC500308E4C000899
61666+:107E2000ACCC00508E4B000CACCB00348E43001019
61667+:107E3000ACC300388E4A0010ACCA00548E42001405
61668+:107E4000ACC2003C8E5F0018AF3F00048E50001C97
61669+:107E5000ACD0002090C40000309800FF130D024AFF
61670+:107E6000000000008CC400348CD00030009030231F
61671+:107E700004C000F12404008C126000EE2402000310
61672+:107E80000A000962AF8200682419000514B900666F
61673+:107E90003C0580003C0808008D085BE88F86FD9C4F
61674+:107EA000ACA800208E4C00048F8AFDA0240720007F
61675+:107EB000ACCC001C924B000824120008A14B001906
61676+:107EC0008F82005890430009A14300188F85005805
61677+:107ED00090BF000A33E400FF1092001028890009C7
61678+:107EE000152000BA240E0002240D0020108D000B76
61679+:107EF000340780002898002117000008240740005C
61680+:107F000024100040109000053C0700012419008057
61681+:107F1000109900023C070002240740008CC20018A0
61682+:107F20003C03FF00004350240147F825ACDF001854
61683+:107F300090B2000BA0D200278F8300589464000CED
61684+:107F4000108001FE000000009467000C3C1F8000C0
61685+:107F50002405FFBFA4C7005C9063000E2407000443
61686+:107F6000A0C300088F820058904A000FA0CA0009E1
61687+:107F70008F8900588D3200108FE400740244C823AA
61688+:107F8000ACD900588D300014ACD0002C95380018B6
61689+:107F9000330DFFFFACCD00409531001A322FFFFFAB
61690+:107FA000ACCF00448D2E001CACCE00489128000EB2
61691+:107FB000A0C8000890CC000801855824126001B6C2
61692+:107FC000A0CB00088F9200580A000962AF870068B2
61693+:107FD0002406000614A600143C0E80003C0F080086
61694+:107FE0008DEF5BE88F85FD98ADCF00208E4900189E
61695+:107FF0008F86FD9C8F8BFDA0ACA900008CC800383B
61696+:1080000024040005ACA800048CCC003C1260008164
61697+:10801000AD6C00000A000962AF84006824110007FB
61698+:1080200010B1004B240400063C05080024A55BE8C1
61699+:108030000E000881240400818F9200580013102B39
61700+:108040000A000962AF820068241F002314BFFFF6F4
61701+:108050003C0C80003C0508008CA55BE88F8BFDA0E4
61702+:10806000AD8500208F91FD9C8E4600042564002084
61703+:1080700026450014AE260028240600030E000F81BA
61704+:10808000257000308F87005802002021240600034D
61705+:108090000E000F8124E500083C04080024845BE8FE
61706+:1080A0000E0008AA0000000092230000240A0050DD
61707+:1080B000306200FF544AFFE18F9200580E000F6CAF
61708+:1080C000000000000A000A6A8F920058240800335A
61709+:1080D00014A800323C0380003C1108008E315BE89C
61710+:1080E0008F8FFDA0AC7100208E420008240D002867
61711+:1080F0008F89FD9CADE200308E4A000C24060009F9
61712+:10810000ADEA00348E5F0010ADFF00388E440014DD
61713+:10811000ADE400208E590018ADF900248E58001CE3
61714+:10812000ADF80028A1ED00118E4E00041260003160
61715+:10813000AD2E00288F9200580A000962AF860068B1
61716+:10814000240D002214ADFFB8000000002404000735
61717+:108150003C1008008E105BE83C188000AF10002037
61718+:108160005660FEAEAF8400683C04080024845BE8DF
61719+:108170000E0008AA241300508F84FD9C90920000EA
61720+:10818000325900FF1333014B000000008F9200585A
61721+:10819000000020210A000962AF8400683C05080045
61722+:1081A00024A55BE80E000858240400810A000A6A2E
61723+:1081B0008F92005802D498213265FFFF0E000852BA
61724+:1081C000240400840A0009628F920058108EFF5325
61725+:1081D000240704002887000310E00179241100041B
61726+:1081E000240F0001548FFF4D240740000A000A228B
61727+:1081F000240701003C05080024A55BE80E0008A444
61728+:10820000240400828F920058000030210A00096285
61729+:10821000AF8600683C04080024845BE88CC2003808
61730+:108220000E0008AA8CC3003C8F9200580A000AC0B6
61731+:1082300000002021240400823C05080024A55BE8FE
61732+:108240000E0008A4000000008F92005800001021CA
61733+:108250000A000962AF8200688E5000048F91FD9C75
61734+:108260003C078000ACF00020922C00050200282181
61735+:10827000318B0002156001562404008A8F92FDA004
61736+:108280002404008D9245001B30A6002014C001502C
61737+:1082900002002821922E00092408001231C900FF93
61738+:1082A0001128014B240400810E00087202002021D5
61739+:1082B0009258001B240F000402002021370D0042B9
61740+:1082C000A24D001B0E00087CA22F00253C0580005B
61741+:1082D0008CA401780480FFFE34B90140241F000201
61742+:1082E000AF300000A33F00048F9200583C101000F4
61743+:1082F000ACB001780A000A6B0013102B8E500004FA
61744+:108300008F91FD9C3C038000AC700020922A0005F8
61745+:108310000200282131420002144000172404008A80
61746+:10832000922C00092412000402002821318B00FF46
61747+:1083300011720011240400810E0008720200202135
61748+:108340008F89FDA0240800122405FFFE912F001B39
61749+:108350000200202135EE0020A12E001BA2280009DA
61750+:108360009226000500C538240E00087CA2270005CF
61751+:1083700002002821000020210E0009330000000027
61752+:108380000A000A6A8F9200588E4C00043C07800055
61753+:108390003C10080026105BE8ACEC00203C01080013
61754+:1083A000AC2C5BE8924B0003317100041220013BBE
61755+:1083B0008F84FD9C24020006A0820009924F001BBE
61756+:1083C000240EFFC031E9003F012E4025A08800089F
61757+:1083D0009245000330A6000114C0013200000000E5
61758+:1083E0008E420008AE0200083C0208008C425BF09E
61759+:1083F000104001318F90FDA0000219C28F8DFD9CAD
61760+:10840000A603000C8E4A000C24180001240400145A
61761+:10841000AE0A002C8E420010AE02001C965F0016C1
61762+:10842000A61F003C96590014A619003EADB8000CDA
61763+:10843000A5B80010A5B80012A5B80014A5B800167C
61764+:1084400012600144A2040011925100033232000272
61765+:108450002E5300018F920058266200080A0009621C
61766+:10846000AF8200688E4400043C1980003C068008FE
61767+:10847000AF2400208E45000890D80000240D005045
61768+:10848000331100FF122D009C2407008824060009E8
61769+:108490000E000845000000000A000A6A8F9200588A
61770+:1084A0008E5000043C0980003C118008AD30002053
61771+:1084B0009228000024050050310400FF10850110AF
61772+:1084C0002407008802002021000028210E00084512
61773+:1084D0002406000E922D00002418FF80020028219F
61774+:1084E00001B8802524040004240600300E0007256E
61775+:1084F000A23000000A000A6A8F9200588E500004D1
61776+:108500008F91FDA03C028000AC500020923F001BE8
61777+:1085100033F900101320006C240700810200202191
61778+:10852000000028212406001F0E000845000000005E
61779+:108530000A000A6A8F9200588E44001C0E00085DE3
61780+:1085400000000000104000E3004048218F880058E0
61781+:1085500024070089012020218D05001C240600012C
61782+:108560000E000845000000000A000A6A8F920058B9
61783+:10857000964900023C10080026105BE831280004F0
61784+:10858000110000973C0460008E4E001C3C0F8000E0
61785+:10859000ADEE00203C010800AC2E5BE896470002DF
61786+:1085A00030E40001148000E6000000008E42000468
61787+:1085B000AE0200083C1008008E105BF0120000ECC8
61788+:1085C0003C0F80008F92FD9C241000018E4E0018FD
61789+:1085D0008F8DFDA08F9FFD9801CF4825AE490018D3
61790+:1085E000A2400005AE50000C3C0808008D085BF06E
61791+:1085F0008F840058A6500010000839C2A6500012FF
61792+:10860000A6500014A6500016A5A7000C8C8C0008DC
61793+:108610008F8B00588F8A0058ADAC002C8D63000CF6
61794+:1086200024070002ADA3001C91460010A1A6001172
61795+:108630008F82005890450011A3E500088F990058DB
61796+:1086400093380012A258004E8F910058922F0013B9
61797+:10865000A1AF00128F920058964E0014A5AE003CB8
61798+:1086600096490016A5A9003E8E480018ADA8001432
61799+:108670005660FD6AAF8700683C05080024A55BE8EA
61800+:108680000E000881000020218F9200580000382140
61801+:108690000A000962AF8700683C05080024A55BE872
61802+:1086A0000E0008A4240400828F9200580A000A4D8C
61803+:1086B000000038210E000F6C000000008F9200585F
61804+:1086C0000A000AC0000020210E00087202002021CA
61805+:1086D0009223001B02002021346A00100E00087C47
61806+:1086E000A22A001B000038210200202100002821BE
61807+:1086F0000A000BA52406001F9242000C305F000107
61808+:1087000013E0000300000000964A000EA4CA002CEB
61809+:10871000924B000C316300025060000600003821CB
61810+:108720008E470014964C0012ACC7001CA4CC001A53
61811+:10873000000038210A000B7F240600093C050800D0
61812+:1087400024A55BE80E0008A42404008B8F92005837
61813+:108750000A000A4D0013382B3C0C08008D8C5BE896
61814+:1087600024DFFFFE25930100326B007F016790211B
61815+:1087700002638824AD110028AE4600E0AE4000E45C
61816+:108780000A0009B3AE5F001CACC000543C0D0800E9
61817+:108790008DAD5BE83C18800C37090100ACED00287A
61818+:1087A0008E510014AD3100E08E4F0014AD2F00E467
61819+:1087B0008E4E001025C7FFFE0A0009F4AD27001CED
61820+:1087C0005491FDD6240740000A000A222407100015
61821+:1087D0000E00092D000000000A000A6A8F9200585E
61822+:1087E0008C83442C3C12DEAD3651BEEF3C010800B8
61823+:1087F000AC205BE810710062000000003C196C6264
61824+:1088000037387970147800082404000297850074C2
61825+:108810009782006C2404009200A2F82B13E0001948
61826+:1088200002002821240400020E00069524050200FF
61827+:108830003C068000ACC200203C010800AC225BE892
61828+:108840001040000D8F8C0058240A002824040003D7
61829+:10885000918B0010316300FF546A00012404000171
61830+:108860000E0000810000000010400004240400837A
61831+:108870000A000BC28F920058240400833C050800B4
61832+:1088800024A55BE80E000881000000008F920058CC
61833+:108890000013382B0A000962AF8700680A000B49F1
61834+:1088A000240200128E4400080E00085D0000000043
61835+:1088B0000A000B55AE0200083C05080024A55BE841
61836+:1088C0000E000858240400878F9200580A000B728B
61837+:1088D0000013102B240400040E000695240500301C
61838+:1088E0001440002A004048218F8800582407008344
61839+:1088F000012020218D05001C0A000BB32406000175
61840+:108900008F8300788F8600701066FEEE000038219D
61841+:108910003C07080024E75B6C000320C00087282187
61842+:108920008CAE000011D0005D246F000131E3000F18
61843+:108930005466FFFA000320C00A000B8C00003821A7
61844+:108940008E4400040E00085D000000000A000BC801
61845+:10895000AE0200083C05080024A55BE80E0008A450
61846+:10896000240400828F9200580A000B72000010212C
61847+:108970003C05080024A55BE80A000C7C2404008761
61848+:108980008C83442C0A000C5B3C196C628F88005865
61849+:108990003C0780083C0C8000240B0050240A000196
61850+:1089A000AD820020A0EB0000A0EA000191030004CA
61851+:1089B000A0E3001891040005A0E400199106000648
61852+:1089C0003C04080024845B6CA0E6001A91020007B6
61853+:1089D0003C06080024C65B68A0E2001B9105000865
61854+:1089E000A0E5001C911F0009A0FF001D9119000ABD
61855+:1089F000A0F9001E9118000BA0F8001F9112000CA6
61856+:108A0000A0F200209111000DA0F100219110000EA4
61857+:108A1000A0F00022910F000FA0EF0023910E001094
61858+:108A2000A0EE0024910D0011A0ED0025950C00147E
61859+:108A3000A4EC0028950B00168F8A00708F920078A6
61860+:108A4000A4EB002A95030018000A10C02545000178
61861+:108A5000A4E3002C8D1F001C0044C0210046C82147
61862+:108A600030A5000FAF3F0000AF09000010B20006B4
61863+:108A7000AF850070000038218D05001C01202021E9
61864+:108A80000A000BB32406000124AD000131A7000F3A
61865+:108A9000AF8700780A000CF9000038213C06080076
61866+:108AA00024C65B680086902100003821ACA000003D
61867+:108AB0000A000B8CAE4000003C0482013C036000C5
61868+:108AC00034820E02AC603D68AF80009803E000087D
61869+:108AD000AC623D6C27BDFFE8AFB000103090FFFFE7
61870+:108AE000001018422C620041AFBF00141440000275
61871+:108AF00024040080240300403C010800AC300060E6
61872+:108B00003C010800AC2300640E000F7500602821B2
61873+:108B1000244802BF2409FF8001092824001039805D
61874+:108B2000001030408FBF00148FB0001000A720212C
61875+:108B300000861821AF8300803C010800AC25005856
61876+:108B40003C010800AC24005C03E0000827BD0018CD
61877+:108B5000308300FF30C6FFFF30E400FF3C08800098
61878+:108B60008D0201B80440FFFE000354000144382583
61879+:108B70003C09600000E920253C031000AD050180A0
61880+:108B8000AD060184AD04018803E00008AD0301B81F
61881+:108B90008F8500583C0A6012354800108CAC0004E8
61882+:108BA0003C0D600E35A60010318B00062D690001CA
61883+:108BB000AD0900C48CA70004ACC731808CA20008AA
61884+:108BC00094A40002ACC231848CA3001C0460000396
61885+:108BD000A784009003E00008000000008CAF00189C
61886+:108BE000ACCF31D08CAE001C03E00008ACCE31D449
61887+:108BF0008F8500588F87FF288F86FF308CAE00044A
61888+:108C00003C0F601235E80010ACEE00788CAD000827
61889+:108C1000ACED007C8CAC0010ACCC004C8CAB000CF0
61890+:108C2000ACCB004894CA00543C0208008C4200447B
61891+:108C300025490001A4C9005494C400543083FFFFA7
61892+:108C400010620017000000003C0208008C42004047
61893+:108C5000A4C200528CA30018ACE300308CA2001414
61894+:108C6000ACE2002C8CB90018ACF900388CB80014B8
61895+:108C700024050001ACF800348D0600BC50C5001975
61896+:108C80008D0200B48D0200B8A4E2004894E40048CC
61897+:108C9000A4E4004A94E800EA03E000083102FFFF80
61898+:108CA0003C0208008C420024A4C00054A4C200521C
61899+:108CB0008CA30018ACE300308CA20014ACE2002CB2
61900+:108CC0008CB90018ACF900388CB8001424050001E8
61901+:108CD000ACF800348D0600BC54C5FFEB8D0200B823
61902+:108CE0008D0200B4A4E2004894E40048A4E4004AE1
61903+:108CF00094E800EA03E000083102FFFF8F86005885
61904+:108D00003C0480008CC900088CC80008000929C0F8
61905+:108D1000000839C0AC87002090C30007306200040F
61906+:108D20001040003EAF85009490CB0007316A0008E8
61907+:108D30001140003D8F87FF2C8CCD000C8CCE001491
61908+:108D400001AE602B11800036000000008CC2000CC8
61909+:108D5000ACE200708CCB00188F85FF288F88FF3025
61910+:108D6000ACEB00748CCA00102402FFF8ACAA00D847
61911+:108D70008CC9000CAD0900608CC4001CACA400D0F0
61912+:108D800090E3007C0062C824A0F9007C90D8000722
61913+:108D9000330F000811E000040000000090ED007C9B
61914+:108DA00035AC0001A0EC007C90CF000731EE000153
61915+:108DB00011C000060000000090E3007C241800347D
61916+:108DC00034790002A0F9007CACB800DC90C2000746
61917+:108DD0003046000210C000040000000090E8007C53
61918+:108DE00035040004A0E4007C90ED007D3C0B600E97
61919+:108DF000356A001031AC003FA0EC007D8D4931D4C4
61920+:108E00003127000110E00002240E0001A0AE00098D
61921+:108E100094AF00EA03E0000831E2FFFF8F87FF2CE8
61922+:108E20000A000DAF8CC200140A000DB0ACE0007057
61923+:108E30008F8C005827BDFFD8AFB3001CAFB200180D
61924+:108E4000AFB00010AFBF0020AFB10014918F00157C
61925+:108E50003C13600E3673001031EB000FA38B009CA7
61926+:108E60008D8F00048D8B0008959F0012959900103E
61927+:108E70009584001A9598001E958E001C33EDFFFF17
61928+:108E8000332AFFFF3089FFFF3308FFFF31C7FFFFA1
61929+:108E90003C010800AC2D00243C010800AC29004432
61930+:108EA0003C010800AC2A0040AE683178AE67317CE6
61931+:108EB00091850015959100163C12601236520010F3
61932+:108EC00030A200FF3230FFFFAE623188AE5000B4F6
61933+:108ED00091830014959F0018240600010066C804C1
61934+:108EE00033F8FFFFAE5900B8AE5800BC918E0014A5
61935+:108EF000AF8F00843C08600631CD00FFAE4D00C04E
61936+:108F0000918A00159584000E3C07600A314900FFE4
61937+:108F1000AF8B00883084FFFFAE4900C835110010C8
61938+:108F20000E000D1034F004103C0208008C4200606A
61939+:108F30003C0308008C6300643C0608008CC60058A3
61940+:108F40003C0508008CA5005C8F8400808FBF00204A
61941+:108F5000AE23004CAE65319CAE030054AE4500DC40
61942+:108F6000AE6231A0AE6331A4AE663198AE22004845
61943+:108F70008FB3001CAE0200508FB10014AE4200E06F
61944+:108F8000AE4300E4AE4600D88FB000108FB2001898
61945+:108F90000A00057D27BD0028978500929783007CF5
61946+:108FA00027BDFFE8AFB0001000A3102BAFBF001427
61947+:108FB000240400058F900058104000552409000239
61948+:108FC0000E0006958F850080AF8200942404000374
61949+:108FD0001040004F240900023C0680000E00008172
61950+:108FE000ACC2002024070001240820001040004DDE
61951+:108FF00024040005978E00928F8AFF2C24090050CC
61952+:1090000025C50001A7850092A14900003C0D08007C
61953+:109010008DAD0064240380008F84FF28000D66005E
61954+:10902000AD4C0018A5400006954B000A8F85FF3017
61955+:109030002402FF8001633024A546000A915F000AE4
61956+:109040000000482103E2C825A159000AA0A0000899
61957+:10905000A140004CA08000D5961800029783009094
61958+:109060003C020004A49800EA960F00022418FFBFF7
61959+:1090700025EE2401A48E00BE8E0D0004ACAD00448C
61960+:109080008E0C0008ACAC0040A4A00050A4A000547A
61961+:109090008E0B000C240C0030AC8B00288E060010C8
61962+:1090A000AC860024A480003EA487004EA487005014
61963+:1090B000A483003CAD420074AC8800D8ACA800602A
61964+:1090C000A08700FC909F00D433F9007FA09900D4C2
61965+:1090D000909000D402187824A08F00D4914E007C88
61966+:1090E00035CD0001A14D007C938B009CAD480070F4
61967+:1090F000AC8C00DCA08B00D68F8800888F87008422
61968+:10910000AC8800C4AC8700C8A5400078A540007AB0
61969+:109110008FBF00148FB000100120102103E0000861
61970+:1091200027BD00188F8500940E0007258F860080CC
61971+:109130000A000E9F2409000227BDFFE0AFB0001017
61972+:109140008F900058AFB10014AFBF00188E09000413
61973+:109150000E00054A000921C08E0800048F84FF28F4
61974+:109160008F82FF30000839C03C068000ACC7002069
61975+:10917000948500EA904300131460001C30B1FFFF97
61976+:109180008F8CFF2C918B0008316A00401540000B3A
61977+:10919000000000008E0D0004022030218FBF001857
61978+:1091A0008FB100148FB00010240400220000382179
61979+:1091B000000D29C00A000D2F27BD00200E000098C9
61980+:1091C000000000008E0D0004022030218FBF001827
61981+:1091D0008FB100148FB00010240400220000382149
61982+:1091E000000D29C00A000D2F27BD00200E000090A1
61983+:1091F000000000008E0D0004022030218FBF0018F7
61984+:109200008FB100148FB00010240400220000382118
61985+:10921000000D29C00A000D2F27BD002027BDFFE04B
61986+:10922000AFB200183092FFFFAFB00010AFBF001C0C
61987+:10923000AFB100141240001E000080218F8600583C
61988+:109240008CC500002403000600053F02000514023F
61989+:1092500030E4000714830016304500FF2CA80006F8
61990+:1092600011000040000558803C0C0800258C58BCBB
61991+:10927000016C50218D490000012000080000000011
61992+:109280008F8E0098240D000111CD005024020002A1
61993+:10929000AF820098260900013130FFFF24C800206A
61994+:1092A0000212202B010030211480FFE5AF88005806
61995+:1092B000020010218FBF001C8FB200188FB1001464
61996+:1092C0008FB0001003E0000827BD00209387007EC8
61997+:1092D00054E00034000030210E000DE700000000D3
61998+:1092E0008F8600580A000EFF240200018F87009825
61999+:1092F0002405000210E50031240400130000282199
62000+:1093000000003021240700010E000D2F0000000096
62001+:109310000A000F008F8600588F83009824020002F5
62002+:109320001462FFF6240400120E000D9A00000000E3
62003+:109330008F85009400403021240400120E000D2F70
62004+:10934000000038210A000F008F8600588F83009894
62005+:109350002411000310710029241F0002107FFFCE8A
62006+:1093600026090001240400100000282100003021FB
62007+:109370000A000F1D240700018F91009824060002A7
62008+:109380001626FFF9240400100E000E410000000014
62009+:10939000144000238F9800588F8600580A000EFF53
62010+:1093A00024020003240400140E000D2F00002821C5
62011+:1093B0008F8600580A000EFF240200020E000EA93C
62012+:1093C000000000000A000F008F8600580E000D3FBD
62013+:1093D00000000000241900022404001400002821C9
62014+:1093E0000000302100003821AF9900980E000D2FA9
62015+:1093F000000000000A000F008F8600580E000D5775
62016+:10940000000000008F8500942419000200403021E4
62017+:1094100024040010000038210A000F56AF9900986C
62018+:109420000040382124040010970F0002000028217A
62019+:109430000E000D2F31E6FFFF8F8600580A000F0047
62020+:10944000AF9100988F84FF2C3C077FFF34E6FFFF2D
62021+:109450008C8500182402000100A61824AC83001893
62022+:1094600003E00008A08200053084FFFF30A5FFFF65
62023+:109470001080000700001821308200011040000217
62024+:1094800000042042006518211480FFFB00052840DD
62025+:1094900003E000080060102110C000070000000079
62026+:1094A0008CA2000024C6FFFF24A50004AC820000AB
62027+:1094B00014C0FFFB2484000403E000080000000047
62028+:1094C00010A0000824A3FFFFAC86000000000000ED
62029+:1094D000000000002402FFFF2463FFFF1462FFFA74
62030+:1094E0002484000403E0000800000000000411C010
62031+:1094F00003E000082442024027BDFFE8AFB000109F
62032+:1095000000808021AFBF00140E000F9600A0202124
62033+:1095100000504821240AFF808FBF00148FB0001034
62034+:10952000012A30243127007F3C08800A3C042100B6
62035+:1095300000E8102100C428253C03800027BD001846
62036+:10954000AC650024AF820038AC400000AC6500245C
62037+:1095500003E00008AC4000403C0D08008DAD005811
62038+:1095600000056180240AFF8001A45821016C482174
62039+:10957000012A30243127007F3C08800C3C04210064
62040+:1095800000E8102100C428253C038000AC650028B9
62041+:10959000AF82003403E00008AC40002430A5FFFF98
62042+:1095A0003C0680008CC201B80440FFFE3C086015F8
62043+:1095B00000A838253C031000ACC40180ACC0018475
62044+:1095C000ACC7018803E00008ACC301B83C0D08003B
62045+:1095D0008DAD005800056180240AFF8001A4582148
62046+:1095E000016C4021010A4824000931403107007F05
62047+:1095F00000C728253C04200000A418253C02800058
62048+:10960000AC43083003E00008AF80003427BDFFE81A
62049+:10961000AFB0001000808021AFBF00140E000F9685
62050+:1096200000A0202100504821240BFF80012B502452
62051+:10963000000A39403128007F3C0620008FBF00140B
62052+:109640008FB0001000E8282534C2000100A21825C0
62053+:109650003C04800027BD0018AC83083003E00008FC
62054+:10966000AF8000383C0580088CA700603C0680086D
62055+:109670000087102B144000112C8340008CA8006040
62056+:109680002D0340001060000F240340008CC90060CF
62057+:109690000089282B14A00002008018218CC30060D0
62058+:1096A00000035A42000B30803C0A0800254A59202A
62059+:1096B00000CA202103E000088C8200001460FFF340
62060+:1096C0002403400000035A42000B30803C0A08008B
62061+:1096D000254A592000CA202103E000088C8200009E
62062+:1096E0003C05800890A60008938400AB24C20001CA
62063+:1096F000304200FF3043007F1064000C0002382726
62064+:10970000A0A200083C0480008C85017804A0FFFE24
62065+:109710008F8A00A0240900023C081000AC8A014096
62066+:10972000A089014403E00008AC8801780A00101BFE
62067+:1097300030E2008027BDFFD8AFB200188F9200A49E
62068+:10974000AFBF0020AFB3001CAFB00010AFB100142A
62069+:109750008F9300348E5900283C1000803C0EFFEFA0
62070+:10976000AE7900008E580024A260000A35CDFFFFBC
62071+:10977000AE7800049251002C3C0BFF9F356AFFFF2E
62072+:10978000A271000C8E6F000C3C080040A271000B0F
62073+:1097900001F06025018D4824012A382400E8302595
62074+:1097A000AE66000C8E450004AE6000183C0400FF5D
62075+:1097B000AE6500148E43002C3482FFFFA6600008C3
62076+:1097C0000062F824AE7F00108E5900088F9000A030
62077+:1097D000964E0012AE7900208E51000C31D83FFF1A
62078+:1097E00000187980AE7100248E4D001401F06021C4
62079+:1097F00031CB0001AE6D00288E4A0018000C41C22A
62080+:10980000000B4B80AE6A002C8E46001C01093821EB
62081+:10981000A667001CAE660030964500028E4400200C
62082+:10982000A665001EAE64003492430033306200042B
62083+:1098300054400006924700003C0280083443010077
62084+:109840008C7F00D0AE7F0030924700008F860038BA
62085+:10985000A0C700309245003330A4000250800007BA
62086+:10986000925100018F880038240BFF80910A00304C
62087+:10987000014B4825A1090030925100018F9000381A
62088+:10988000240CFFBF2404FFDFA21100318F8D0038AC
62089+:109890003C1880083711008091AF003C31EE007F0A
62090+:1098A000A1AE003C8F890038912B003C016C502404
62091+:1098B000A12A003C8F9F00388E68001493E6003C7C
62092+:1098C0002D0700010007114000C4282400A218251C
62093+:1098D000A3E3003C8F87003896590012A4F90032A8
62094+:1098E0008E450004922E007C30B0000300107823D7
62095+:1098F00031ED000300AD102131CC000215800002D3
62096+:1099000024460034244600303C0280083443008062
62097+:10991000907F007C00BFC824333800041700000289
62098+:1099200024C2000400C010218F98003824190002BE
62099+:10993000ACE20034A3190000924F003F8F8E003834
62100+:109940003C0C8008358B0080A1CF00018F9100383E
62101+:10995000924D003F8E440004A62D0002956A005CE3
62102+:109960000E000FF43150FFFF00024B800209382532
62103+:109970003C08420000E82825AE2500048E4400384B
62104+:109980008F850038ACA400188E460034ACA6001CAD
62105+:10999000ACA0000CACA00010A4A00014A4A0001661
62106+:1099A000A4A00020A4A00022ACA000248E62001479
62107+:1099B00050400001240200018FBF00208FB3001C23
62108+:1099C0008FB200188FB100148FB00010ACA2000845
62109+:1099D0000A00101327BD002827BDFFC83C058008DA
62110+:1099E00034A40080AFBF0034AFBE0030AFB7002C4E
62111+:1099F000AFB60028AFB50024AFB40020AFB3001C51
62112+:109A0000AFB20018AFB10014AFB00010948300786B
62113+:109A10009482007A104300512405FFFF0080F0215A
62114+:109A20000A0011230080B821108B004D8FBF003435
62115+:109A30008F8600A03C1808008F18005C2411FF805E
62116+:109A40003C1680000306782101F18024AED0002C62
62117+:109A500096EE007A31EC007F3C0D800E31CB7FFF1B
62118+:109A6000018D5021000B4840012AA82196A4000036
62119+:109A70003C0808008D0800582405FF8030953FFF02
62120+:109A800001061821001539800067C8210325F82434
62121+:109A90003C02010003E290253338007F3C11800C2A
62122+:109AA000AED20028031190219250000D320F000415
62123+:109AB00011E0003702E0982196E3007A96E8007AF8
62124+:109AC00096E5007A2404800031077FFF24E300013B
62125+:109AD00030627FFF00A4F82403E2C825A6F9007ACB
62126+:109AE00096E6007A3C1408008E94006030D67FFF22
62127+:109AF00012D400C1000000008E5800188F8400A00E
62128+:109B000002A028212713FFFF0E000FCEAE53002C1A
62129+:109B100097D5007897D4007A12950010000028217C
62130+:109B20003C098008352401003C0A8008914800085F
62131+:109B3000908700D53114007F30E400FF0284302B81
62132+:109B400014C0FFB9268B0001938E00AB268C000158
62133+:109B5000008E682115ACFFB78F8600A08FBF003440
62134+:109B60008FBE00308FB7002C8FB600288FB5002431
62135+:109B70008FB400208FB3001C8FB200188FB1001477
62136+:109B80008FB0001000A0102103E0000827BD0038AE
62137+:109B900000C020210E000F99028028218E4B00105A
62138+:109BA0008E4C00308F84003824090002016C502351
62139+:109BB000AE4A0010A089000096E3005C8E4400309D
62140+:109BC0008F9100380E000FF43070FFFF00024380C9
62141+:109BD000020838253C02420000E22825AE25000498
62142+:109BE0008E5F00048F8A00388E590000240B000815
62143+:109BF000AD5F001CAD590018AD40000CAD40001029
62144+:109C00009246000A240400052408C00030D000FF5A
62145+:109C1000A550001496580008A55800169251000A45
62146+:109C20003C188008322F00FFA54F0020964E0008F8
62147+:109C300037110100A54E0022AD400024924D000BCB
62148+:109C400031AC00FFA54C0002A14B00018E49003051
62149+:109C50008F830038240BFFBFAC690008A06400307C
62150+:109C60008F9000382403FFDF9607003200E8282495
62151+:109C700000B51025A6020032921F003233F9003FD2
62152+:109C800037260040A20600328F8C0038AD800034A9
62153+:109C90008E2F00D0AD8F0038918E003C3C0F7FFF9F
62154+:109CA00031CD007FA18D003C8F84003835EEFFFF61
62155+:109CB000908A003C014B4824A089003C8F850038E5
62156+:109CC00090A8003C01033824A0A7003C8E42003439
62157+:109CD0008F9100383C038008AE2200408E59002C42
62158+:109CE0008E5F0030033F3023AE26004492300048A0
62159+:109CF0003218007FA23800488F8800388E4D00301F
62160+:109D00008D0C004801AE582401965024014B482583
62161+:109D1000AD0900489244000AA104004C964700088F
62162+:109D20008F850038A4A7004E8E5000308E4400303E
62163+:109D30000E0003818C65006092F9007C0002F940FE
62164+:109D4000004028210002110003E2302133360002D6
62165+:109D500012C00003020680210005B0800216802197
62166+:109D6000926D007C31B30004126000020005708027
62167+:109D7000020E80218E4B00308F8800382405800031
62168+:109D8000316A0003000A4823312400030204182129
62169+:109D9000AD03003496E4007A96F0007A96F1007AEA
62170+:109DA00032027FFF2447000130FF7FFF0225C824D5
62171+:109DB000033F3025A6E6007A96F8007A3C120800A8
62172+:109DC0008E520060330F7FFF11F200180000000078
62173+:109DD0008F8400A00E000FCE02A028218F8400A047
62174+:109DE0000E000FDE028028210E001013000000007C
62175+:109DF0000A00111F0000000096F1007A022480245E
62176+:109E0000A6F0007A92EF007A92EB007A31EE00FF32
62177+:109E1000000E69C2000D6027000C51C03169007F3F
62178+:109E2000012A20250A001119A2E4007A96E6007A98
62179+:109E300000C5C024A6F8007A92EF007A92F3007A67
62180+:109E400031F200FF001271C2000E6827000DB1C090
62181+:109E5000326C007F01962825A2E5007A0A0011D015
62182+:109E60008F8400A03C0380003084FFFF30A5FFFFFB
62183+:109E7000AC640018AC65001C03E000088C620014A0
62184+:109E800027BDFFA03C068008AFBF005CAFBE0058F6
62185+:109E9000AFB70054AFB60050AFB5004CAFB40048F8
62186+:109EA000AFB30044AFB20040AFB1003CAFB0003838
62187+:109EB00034C80100910500D590C700083084FFFF29
62188+:109EC00030A500FF30E2007F0045182AAFA4001043
62189+:109ED000A7A00018A7A0002610600055AFA000148E
62190+:109EE00090CA00083149007F00A9302324D3FFFF26
62191+:109EF0000013802B8FB400100014902B02128824C2
62192+:109F0000522000888FB300143C03800894790052DB
62193+:109F1000947E00508FB60010033EC0230018BC0092
62194+:109F2000001714030016FC0002C2A82A16A00002A3
62195+:109F3000001F2C030040282100133C0000072403CD
62196+:109F400000A4102A5440000100A020212885000907
62197+:109F500014A000020080A021241400083C0C8008FA
62198+:109F60008D860048001459808D88004C3C03800089
62199+:109F70003169FFFF3C0A0010012A202534710400DA
62200+:109F8000AC660038AF9100A4AC68003CAC64003013
62201+:109F900000000000000000000000000000000000C1
62202+:109FA00000000000000000000000000000000000B1
62203+:109FB0008C6E000031CD002011A0FFFD0014782A26
62204+:109FC00001F01024104000390000A8213C16800840
62205+:109FD00092D700083C1280008E44010032F6007FC8
62206+:109FE0000E000F9902C028218E3900108E44010006
62207+:109FF0000000902133373FFF0E000FB102E028210F
62208+:10A00000923800003302003F2C500008520000102C
62209+:10A0100000008821000210803C030800246358E4FB
62210+:10A020000043F8218FFE000003C00008000000007C
62211+:10A0300090CF0008938C00AB31EE007F00AE682318
62212+:10A04000018D58210A0012172573FFFF0000882197
62213+:10A050003C1E80008FC401000E000FCE02E02821BC
62214+:10A060008FC401000E000FDE02C028211220000F55
62215+:10A070000013802B8F8B00A426A400010004AC00E9
62216+:10A08000027298230015AC032578004002B4B02A70
62217+:10A090000013802B241700010300882102D0102414
62218+:10A0A000AF9800A41440FFC9AFB700143C07800864
62219+:10A0B00094E200508FAE00103C05800002A288217F
62220+:10A0C0003C060020A4F10050ACA6003094F40050EF
62221+:10A0D00094EF005201D51823306CFFFF11F4001EDD
62222+:10A0E000AFAC00108CEF004C001561808CF500487F
62223+:10A0F00001EC28210000202100AC582B02A4C02133
62224+:10A10000030BB021ACE5004CACF600488FB4001056
62225+:10A110000014902B021288241620FF7C3C03800838
62226+:10A120008FB300148FBF005C8FBE00583A620001ED
62227+:10A130008FB700548FB600508FB5004C8FB40048D5
62228+:10A140008FB300448FB200408FB1003C8FB0003815
62229+:10A1500003E0000827BD006094FE00548CF2004428
62230+:10A1600033C9FFFE0009C8C00259F821ACBF003C4A
62231+:10A170008CE800448CAD003C010D50231940003B9D
62232+:10A18000000000008CF7004026E20001ACA200387D
62233+:10A190003C05005034A700103C038000AC67003041
62234+:10A1A00000000000000000000000000000000000AF
62235+:10A1B000000000000000000000000000000000009F
62236+:10A1C0008C7800003316002012C0FFFD3C1180087F
62237+:10A1D000962200543C1580003C068008304E000159
62238+:10A1E000000E18C0007578218DEC04003C070800B3
62239+:10A1F0008CE700443C040020ACCC00488DF40404FF
62240+:10A20000240B0001ACD4004C10EB0260AEA4003073
62241+:10A21000963900523C0508008CA5004000B99021F9
62242+:10A22000A6320052963F005427ED0001A62D00549F
62243+:10A230009626005430C4FFFF5487FF2F8FB40010C0
62244+:10A2400030A5FFFF0E0011F4A62000543C070800C3
62245+:10A250008CE70024963E00520047B82303D74823DA
62246+:10A26000A62900520A0012198FB400108CE2004097
62247+:10A270000A0012BE00000000922400012407000121
62248+:10A280003085007F14A7001C97AD00268E2B00148C
62249+:10A29000240CC000316A3FFF01AC48243C06080092
62250+:10A2A0008CC60060012A402531043FFF0086882BC0
62251+:10A2B00012200011A7A800263C0508008CA5005814
62252+:10A2C0008F9100A0000439802402FF8000B1182182
62253+:10A2D0000067F82103E2F02433F8007F3C1280008D
62254+:10A2E0003C19800EAE5E002C0319702191D0000D38
62255+:10A2F000360F0004A1CF000D0E001028241200011B
62256+:10A30000241100013C1E80008FC401000E000FCEFE
62257+:10A3100002E028218FC401000E000FDE02C02821B8
62258+:10A320001620FF558F8B00A40A0012860013802B85
62259+:10A330008F8600A490C80001310400201080019194
62260+:10A34000241000013C048008348B0080916A007C5A
62261+:10A350008F9E0034AFA0002C314900011120000F66
62262+:10A36000AFB000288CCD00148C8E006001AE602B45
62263+:10A370001580000201A038218C8700603C188008FD
62264+:10A38000370300808C70007000F0782B15E000021D
62265+:10A3900000E020218C640070AFA4002C3C028008F7
62266+:10A3A000344500808CD200148CBF0070025FC82B33
62267+:10A3B00017200002024020218CA400708FA7002CDF
62268+:10A3C0000087182310600003AFA3003024050002AB
62269+:10A3D000AFA500288FA400280264882B162000BA9D
62270+:10A3E000000018218CD000388FCE000C3C0F00806C
62271+:10A3F000AFD000008CCD00343C0CFF9F01CF58251E
62272+:10A40000AFCD000490CA003F3586FFFF01662024CF
62273+:10A410003C0900203C08FFEFA3CA000B0089382547
62274+:10A420003511FFFF00F118243C0500088F8700A4B8
62275+:10A430000065C825AFD9000C8CE20014AFC000182D
62276+:10A440008FA60030AFC200148CF800188FB0002C1B
62277+:10A450003C1FFFFBAFD8001C8CEF000837F2FFFF5A
62278+:10A4600003326824AFCF00248CEC000C020670216C
62279+:10A47000AFCD000CA7C00038A7C0003AAFCE002C6B
62280+:10A48000AFCC0020AFC000288CEA00148FAB002CAA
62281+:10A49000014B48230126402311000011AFC80010D2
62282+:10A4A00090EB003D8FC900048FC80000000B5100E5
62283+:10A4B000012A28210000102100AA882B010218215E
62284+:10A4C0000071F821AFC50004AFDF000090F2003D3D
62285+:10A4D000A3D2000A8F9900A497380006A7D80008D5
62286+:10A4E0008F910038240800023C038008A228000055
62287+:10A4F0003465008094BF005C8FA4002C33F0FFFF14
62288+:10A500000E000FF48F9200380002CB808F8500A4DC
62289+:10A51000021978253C18420001F87025AE4E00045F
62290+:10A520008F8400388CAD0038AC8D00188CAC0034B2
62291+:10A53000AC8C001CAC80000CAC800010A48000141B
62292+:10A54000A4800016A4800020A4800022AC800024F7
62293+:10A5500090A6003F8FA7002CA486000250E0019235
62294+:10A56000240700018FA200305040000290A2003D5D
62295+:10A5700090A2003E244A0001A08A00018F84003886
62296+:10A580008FA9002CAC8900083C128008364D008051
62297+:10A5900091AC007C3186000214C000022407003414
62298+:10A5A000240700308F8500A43C198008373F0080C5
62299+:10A5B00090B0000093F9007C240E0004A0900030BD
62300+:10A5C0008F8F00A48FB8002C8F8D003891F200017E
62301+:10A5D0003304000301C46023A1B200318F8E003820
62302+:10A5E0008F8600A42402C00095CA003294C90012CC
62303+:10A5F0008FAB002C0142402431233FFF010388250B
62304+:10A60000A5D1003291D000323185000300EBF82152
62305+:10A610003218003F370F0040A1CF00328FA4002C2A
62306+:10A6200003E5382133280004108000028F850038AC
62307+:10A6300000E838213C0A8008ACA700343549010005
62308+:10A640008D2800D08FA3002C2419FFBFACA80038A0
62309+:10A6500090B1003C2C640001240FFFDF3227007F03
62310+:10A66000A0A7003C8F98003800049140931F003C45
62311+:10A6700003F98024A310003C8F8C0038918E003C9D
62312+:10A6800001CF682401B23025A186003C8F8900A447
62313+:10A690008F8800388D2B0020AD0B00408D220024C8
62314+:10A6A000AD0200448D2A0028AD0A00488D23002CFD
62315+:10A6B0000E001013AD03004C8FB1002824070002D8
62316+:10A6C000122700118FA300280003282B00058023E8
62317+:10A6D0000270982400608021006090210A00126FAF
62318+:10A6E0000010882B962900128F8400A00000902172
62319+:10A6F0003125FFFFA7A900180E000FC22411000189
62320+:10A700000A00131D3C1E80003C0B80003C12800898
62321+:10A710008D640100924900088F92FF340E000F995A
62322+:10A720003125007F8F9900388FA700288FA4003033
62323+:10A73000A3270000965F005C33F0FFFF0E000FF4CC
62324+:10A740008F91003800026B80020D80253C0842008A
62325+:10A750008F8D00A402085025AE2A00048DA5003874
62326+:10A760008F8A003800007821000F1100AD450018D5
62327+:10A770008DB800343C047FFF3488FFFFAD58001CC7
62328+:10A7800091A6003E8D4C001C8D4900180006190052
62329+:10A79000000677020183C821004E58250323882B29
62330+:10A7A000012B382100F1F821AD59001CAD5F0018D4
62331+:10A7B000AD40000CAD40001091B0003E8FA40030C1
62332+:10A7C00024090005A550001495A500042419C00013
62333+:10A7D00000884024A545001691B8003EA5580020E9
62334+:10A7E00095AF0004A54F0022AD40002491AE003F7C
62335+:10A7F000A54E000291A6003E91AC003D01861023BB
62336+:10A80000244B0001A14B00018F9100388FA3003031
62337+:10A810003C028008344B0100AE230008A22900301E
62338+:10A820008F8C00388F8700A4959F003294F000121F
62339+:10A830002407FFBF033FC02432053FFF03057825EF
62340+:10A84000A58F0032918E00322418FFDF31CD003FFA
62341+:10A8500035A60040A18600328F910038240DFFFFFD
62342+:10A86000240CFF80AE2000348D6A00D0AE2A003860
62343+:10A870009223003C3069007FA229003C8F90003871
62344+:10A880003C0380009219003C0327F824A21F003CDF
62345+:10A890008F8E003891C5003C00B87824A1CF003CD1
62346+:10A8A0008F8A00383C0E8008AD4D00408FA6002CEA
62347+:10A8B000AD46004491420048004C5825A14B004849
62348+:10A8C0008F9000388F9900A48E09004801238824B6
62349+:10A8D00002283825AE070048933F003EA21F004CD7
62350+:10A8E0008F9800A48F8F003897050004A5E5004ECF
62351+:10A8F0000E0003818DC500609246007C8FAC003055
62352+:10A9000000026940000291000040282130CB000283
62353+:10A9100001B21021156000AA018230213C0E80088E
62354+:10A9200035C20080904C007C31830004106000032D
62355+:10A930008FB900300005788000CF3021241F00043B
62356+:10A940008F910038332D000303ED8023320800037C
62357+:10A9500000C85021AE2A00343C188000A7C500383A
62358+:10A960003C0680088F04010090DE00080E000FDE18
62359+:10A9700033C5007F0E001013000000000A00140D04
62360+:10A980008FA300288F9800348CC90038241F00033F
62361+:10A99000A7000008AF0900008CC50034A300000A1E
62362+:10A9A0008F9900A4AF0500043C080080932D003F60
62363+:10A9B000A31F000C8F0A000C3C02FF9FA30D000B8D
62364+:10A9C0000148F0253451FFFF3C12FFEF8F9900A49E
62365+:10A9D00003D170243646FFFF01C61824AF03000CD4
62366+:10A9E0008F2C0014972900128F8400A0AF0C001048
62367+:10A9F0008F2F0014AF000018AF000020AF0F00141D
62368+:10AA0000AF0000248F270018312F3FFF000F59801F
62369+:10AA1000AF0700288F2500080164F821312D0001BF
62370+:10AA2000AF0500308F31000C8F920038001F51C2EB
62371+:10AA3000000D438001481021241E00023C068008BE
62372+:10AA4000A702001CA7000034AF11002CA25E00007A
62373+:10AA500034D20080964E005C8F9900383C0342004F
62374+:10AA600031CCFFFF01833825AF2700048F8B00A472
62375+:10AA7000240500012402C0008D640038240700343E
62376+:10AA8000AF2400188D690034AF29001CAF20000CE2
62377+:10AA9000AF200010A7200014A7200016A720002038
62378+:10AAA000A7200022AF200024A7300002A325000128
62379+:10AAB0008F8800388F9F00A4AD10000893ED000030
62380+:10AAC000A10D00308F8A00A48F98003891510001A9
62381+:10AAD000A31100318F8B0038957E003203C27024A1
62382+:10AAE00001CF6025A56C0032916300323064003FD5
62383+:10AAF000A16400329249007C3125000214A00002BA
62384+:10AB00008F840038240700303C198008AC8700345B
62385+:10AB1000373201008E5F00D0240AFFBF020090216F
62386+:10AB2000AC9F0038908D003C31A8007FA088003C8D
62387+:10AB30008F9E003893C2003C004A8824A3D1003C79
62388+:10AB40008F8300380010882B9066003C34CE0020A4
62389+:10AB5000A06E003C8F8400A48F9800388C8C00205D
62390+:10AB6000AF0C00408C8F0024AF0F00448C8700286E
62391+:10AB7000AF0700488C8B002CAF0B004C0E0010135D
62392+:10AB80003C1E80000A0012700000000094C80052B1
62393+:10AB90003C0A08008D4A002401488821A4D10052B3
62394+:10ABA0000A0012198FB40010A08700018F840038AA
62395+:10ABB000240B0001AC8B00080A0013BE3C12800875
62396+:10ABC000000520800A0014A200C4302127BDFFE048
62397+:10ABD0003C0D8008AFB20018AFB00010AFBF001C32
62398+:10ABE000AFB1001435B200808E4C001835A80100BA
62399+:10ABF000964B000695A70050910900FC000C5602E8
62400+:10AC0000016728233143007F312600FF240200031F
62401+:10AC1000AF8300A8AF8400A010C2001B30B0FFFFBC
62402+:10AC2000910600FC2412000530C200FF10520033D0
62403+:10AC300000000000160000098FBF001C8FB2001832
62404+:10AC40008FB100148FB00010240D0C003C0C80005C
62405+:10AC500027BD002003E00008AD8D00240E0011FB8D
62406+:10AC6000020020218FBF001C8FB200188FB100148A
62407+:10AC70008FB00010240D0C003C0C800027BD00207C
62408+:10AC800003E00008AD8D0024965800789651007AB4
62409+:10AC9000924E007D0238782631E8FFFF31C400C0B3
62410+:10ACA000148000092D11000116000037000000007B
62411+:10ACB0005620FFE28FBF001C0E0010D100000000E4
62412+:10ACC0000A00156A8FBF001C1620FFDA0000000082
62413+:10ACD0000E0010D1000000001440FFD88FBF001CF0
62414+:10ACE0001600002200000000925F007D33E2003F6A
62415+:10ACF000A242007D0A00156A8FBF001C950900EA78
62416+:10AD00008F86008000802821240400050E0007257E
62417+:10AD10003130FFFF978300923C0480002465FFFFE1
62418+:10AD2000A78500928C8A01B80540FFFE0000000054
62419+:10AD3000AC8001808FBF001CAC9001848FB20018E2
62420+:10AD40008FB100148FB000103C0760133C0B100053
62421+:10AD5000240D0C003C0C800027BD0020AC8701882E
62422+:10AD6000AC8B01B803E00008AD8D00240E0011FB90
62423+:10AD7000020020215040FFB18FBF001C925F007D78
62424+:10AD80000A00159733E2003F0E0011FB020020215C
62425+:10AD90001440FFAA8FBF001C122000070000000013
62426+:10ADA0009259007D3330003F36020040A242007DC0
62427+:10ADB0000A00156A8FBF001C0E0010D100000000B1
62428+:10ADC0005040FF9E8FBF001C9259007D3330003FE2
62429+:10ADD0000A0015C636020040000000000000001BFB
62430+:10ADE0000000000F0000000A00000008000000063C
62431+:10ADF0000000000500000005000000040000000441
62432+:10AE00000000000300000003000000030000000336
62433+:10AE10000000000300000002000000020000000229
62434+:10AE2000000000020000000200000002000000021A
62435+:10AE3000000000020000000200000002000000020A
62436+:10AE400000000002000000020000000200000002FA
62437+:10AE50000000000100000001000000018008010066
62438+:10AE6000800800808008000000000C000000308096
62439+:10AE7000080011D00800127C08001294080012A8E3
62440+:10AE8000080012BC080011D0080011D0080012F010
62441+:10AE90000800132C080013400800138808001A8CBF
62442+:10AEA00008001A8C08001AC408001AC408001AD82E
62443+:10AEB00008001AA808001D0008001CCC08001D5836
62444+:10AEC00008001D5808001DE008001D108008024001
62445+:10AED000080027340800256C0800275C080027F4C8
62446+:10AEE0000800293C0800298808002AAC080029B479
62447+:10AEF00008002A38080025DC08002EDC08002EA4F3
62448+:10AF000008002588080025880800258808002B20CF
62449+:10AF100008002B20080025880800258808002DD06F
62450+:10AF2000080025880800258808002588080025884D
62451+:10AF300008002E0C080025880800258808002588B0
62452+:10AF4000080025880800258808002588080025882D
62453+:10AF5000080025880800258808002588080025881D
62454+:10AF6000080025880800258808002588080029A8E9
62455+:10AF7000080025880800258808002E680800258814
62456+:10AF800008002588080025880800258808002588ED
62457+:10AF900008002588080025880800258808002588DD
62458+:10AFA00008002588080025880800258808002588CD
62459+:10AFB00008002588080025880800258808002588BD
62460+:10AFC00008002CF4080025880800258808002C6853
62461+:10AFD00008002BC408003CE408003CB808003C848E
62462+:10AFE00008003C5808003C3808003BEC8008010091
62463+:10AFF00080080080800800008008008008004C6401
62464+:10B0000008004C9C08004BE408004C6408004C64A9
62465+:10B01000080049B808004C64080050500A000C842D
62466+:10B0200000000000000000000000000D7278703683
62467+:10B030002E322E31620000000602010300000000E3
62468+:10B0400000000001000000000000000000000000FF
62469+:10B0500000000000000000000000000000000000F0
62470+:10B0600000000000000000000000000000000000E0
62471+:10B0700000000000000000000000000000000000D0
62472+:10B0800000000000000000000000000000000000C0
62473+:10B0900000000000000000000000000000000000B0
62474+:10B0A00000000000000000000000000000000000A0
62475+:10B0B0000000000000000000000000000000000090
62476+:10B0C0000000000000000000000000000000000080
62477+:10B0D0000000000000000000000000000000000070
62478+:10B0E0000000000000000000000000000000000060
62479+:10B0F0000000000000000000000000000000000050
62480+:10B10000000000000000000000000000000000003F
62481+:10B11000000000000000000000000000000000002F
62482+:10B12000000000000000000000000000000000001F
62483+:10B13000000000000000000000000000000000000F
62484+:10B1400000000000000000000000000000000000FF
62485+:10B1500000000000000000000000000000000000EF
62486+:10B1600000000000000000000000000000000000DF
62487+:10B1700000000000000000000000000000000000CF
62488+:10B1800000000000000000000000000000000000BF
62489+:10B1900000000000000000000000000000000000AF
62490+:10B1A000000000000000000000000000000000009F
62491+:10B1B000000000000000000000000000000000008F
62492+:10B1C000000000000000000000000000000000007F
62493+:10B1D000000000000000000000000000000000006F
62494+:10B1E000000000000000000000000000000000005F
62495+:10B1F000000000000000000000000000000000004F
62496+:10B20000000000000000000000000000000000003E
62497+:10B21000000000000000000000000000000000002E
62498+:10B22000000000000000000000000000000000001E
62499+:10B23000000000000000000000000000000000000E
62500+:10B2400000000000000000000000000000000000FE
62501+:10B2500000000000000000000000000000000000EE
62502+:10B2600000000000000000000000000000000000DE
62503+:10B2700000000000000000000000000000000000CE
62504+:10B2800000000000000000000000000000000000BE
62505+:10B2900000000000000000000000000000000000AE
62506+:10B2A000000000000000000000000000000000009E
62507+:10B2B000000000000000000000000000000000008E
62508+:10B2C000000000000000000000000000000000007E
62509+:10B2D000000000000000000000000000000000006E
62510+:10B2E000000000000000000000000000000000005E
62511+:10B2F000000000000000000000000000000000004E
62512+:10B30000000000000000000000000000000000003D
62513+:10B31000000000000000000000000000000000002D
62514+:10B32000000000000000000000000000000000001D
62515+:10B33000000000000000000000000000000000000D
62516+:10B3400000000000000000000000000000000000FD
62517+:10B3500000000000000000000000000000000000ED
62518+:10B3600000000000000000000000000000000000DD
62519+:10B3700000000000000000000000000000000000CD
62520+:10B3800000000000000000000000000000000000BD
62521+:10B3900000000000000000000000000000000000AD
62522+:10B3A000000000000000000000000000000000009D
62523+:10B3B000000000000000000000000000000000008D
62524+:10B3C000000000000000000000000000000000007D
62525+:10B3D000000000000000000000000000000000006D
62526+:10B3E000000000000000000000000000000000005D
62527+:10B3F000000000000000000000000000000000004D
62528+:10B40000000000000000000000000000000000003C
62529+:10B41000000000000000000000000000000000002C
62530+:10B42000000000000000000000000000000000001C
62531+:10B43000000000000000000000000000000000000C
62532+:10B4400000000000000000000000000000000000FC
62533+:10B4500000000000000000000000000000000000EC
62534+:10B4600000000000000000000000000000000000DC
62535+:10B4700000000000000000000000000000000000CC
62536+:10B4800000000000000000000000000000000000BC
62537+:10B4900000000000000000000000000000000000AC
62538+:10B4A000000000000000000000000000000000009C
62539+:10B4B000000000000000000000000000000000008C
62540+:10B4C000000000000000000000000000000000007C
62541+:10B4D000000000000000000000000000000000006C
62542+:10B4E000000000000000000000000000000000005C
62543+:10B4F000000000000000000000000000000000004C
62544+:10B50000000000000000000000000000000000003B
62545+:10B51000000000000000000000000000000000002B
62546+:10B52000000000000000000000000000000000001B
62547+:10B53000000000000000000000000000000000000B
62548+:10B5400000000000000000000000000000000000FB
62549+:10B5500000000000000000000000000000000000EB
62550+:10B5600000000000000000000000000000000000DB
62551+:10B5700000000000000000000000000000000000CB
62552+:10B5800000000000000000000000000000000000BB
62553+:10B5900000000000000000000000000000000000AB
62554+:10B5A000000000000000000000000000000000009B
62555+:10B5B000000000000000000000000000000000008B
62556+:10B5C000000000000000000000000000000000007B
62557+:10B5D000000000000000000000000000000000006B
62558+:10B5E000000000000000000000000000000000005B
62559+:10B5F000000000000000000000000000000000004B
62560+:10B60000000000000000000000000000000000003A
62561+:10B61000000000000000000000000000000000002A
62562+:10B62000000000000000000000000000000000001A
62563+:10B63000000000000000000000000000000000000A
62564+:10B6400000000000000000000000000000000000FA
62565+:10B6500000000000000000000000000000000000EA
62566+:10B6600000000000000000000000000000000000DA
62567+:10B6700000000000000000000000000000000000CA
62568+:10B6800000000000000000000000000000000000BA
62569+:10B6900000000000000000000000000000000000AA
62570+:10B6A000000000000000000000000000000000009A
62571+:10B6B000000000000000000000000000000000008A
62572+:10B6C000000000000000000000000000000000007A
62573+:10B6D000000000000000000000000000000000006A
62574+:10B6E000000000000000000000000000000000005A
62575+:10B6F000000000000000000000000000000000004A
62576+:10B700000000000000000000000000000000000039
62577+:10B710000000000000000000000000000000000029
62578+:10B720000000000000000000000000000000000019
62579+:10B730000000000000000000000000000000000009
62580+:10B7400000000000000000000000000000000000F9
62581+:10B7500000000000000000000000000000000000E9
62582+:10B7600000000000000000000000000000000000D9
62583+:10B7700000000000000000000000000000000000C9
62584+:10B7800000000000000000000000000000000000B9
62585+:10B7900000000000000000000000000000000000A9
62586+:10B7A0000000000000000000000000000000000099
62587+:10B7B0000000000000000000000000000000000089
62588+:10B7C0000000000000000000000000000000000079
62589+:10B7D0000000000000000000000000000000000069
62590+:10B7E0000000000000000000000000000000000059
62591+:10B7F0000000000000000000000000000000000049
62592+:10B800000000000000000000000000000000000038
62593+:10B810000000000000000000000000000000000028
62594+:10B820000000000000000000000000000000000018
62595+:10B830000000000000000000000000000000000008
62596+:10B8400000000000000000000000000000000000F8
62597+:10B8500000000000000000000000000000000000E8
62598+:10B8600000000000000000000000000000000000D8
62599+:10B8700000000000000000000000000000000000C8
62600+:10B8800000000000000000000000000000000000B8
62601+:10B8900000000000000000000000000000000000A8
62602+:10B8A0000000000000000000000000000000000098
62603+:10B8B0000000000000000000000000000000000088
62604+:10B8C0000000000000000000000000000000000078
62605+:10B8D0000000000000000000000000000000000068
62606+:10B8E0000000000000000000000000000000000058
62607+:10B8F0000000000000000000000000000000000048
62608+:10B900000000000000000000000000000000000037
62609+:10B910000000000000000000000000000000000027
62610+:10B920000000000000000000000000000000000017
62611+:10B930000000000000000000000000000000000007
62612+:10B9400000000000000000000000000000000000F7
62613+:10B9500000000000000000000000000000000000E7
62614+:10B9600000000000000000000000000000000000D7
62615+:10B9700000000000000000000000000000000000C7
62616+:10B9800000000000000000000000000000000000B7
62617+:10B9900000000000000000000000000000000000A7
62618+:10B9A0000000000000000000000000000000000097
62619+:10B9B0000000000000000000000000000000000087
62620+:10B9C0000000000000000000000000000000000077
62621+:10B9D0000000000000000000000000000000000067
62622+:10B9E0000000000000000000000000000000000057
62623+:10B9F0000000000000000000000000000000000047
62624+:10BA00000000000000000000000000000000000036
62625+:10BA10000000000000000000000000000000000026
62626+:10BA20000000000000000000000000000000000016
62627+:10BA30000000000000000000000000000000000006
62628+:10BA400000000000000000000000000000000000F6
62629+:10BA500000000000000000000000000000000000E6
62630+:10BA600000000000000000000000000000000000D6
62631+:10BA700000000000000000000000000000000000C6
62632+:10BA800000000000000000000000000000000000B6
62633+:10BA900000000000000000000000000000000000A6
62634+:10BAA0000000000000000000000000000000000096
62635+:10BAB0000000000000000000000000000000000086
62636+:10BAC0000000000000000000000000000000000076
62637+:10BAD0000000000000000000000000000000000066
62638+:10BAE0000000000000000000000000000000000056
62639+:10BAF0000000000000000000000000000000000046
62640+:10BB00000000000000000000000000000000000035
62641+:10BB10000000000000000000000000000000000025
62642+:10BB20000000000000000000000000000000000015
62643+:10BB30000000000000000000000000000000000005
62644+:10BB400000000000000000000000000000000000F5
62645+:10BB500000000000000000000000000000000000E5
62646+:10BB600000000000000000000000000000000000D5
62647+:10BB700000000000000000000000000000000000C5
62648+:10BB800000000000000000000000000000000000B5
62649+:10BB900000000000000000000000000000000000A5
62650+:10BBA0000000000000000000000000000000000095
62651+:10BBB0000000000000000000000000000000000085
62652+:10BBC0000000000000000000000000000000000075
62653+:10BBD0000000000000000000000000000000000065
62654+:10BBE0000000000000000000000000000000000055
62655+:10BBF0000000000000000000000000000000000045
62656+:10BC00000000000000000000000000000000000034
62657+:10BC10000000000000000000000000000000000024
62658+:10BC20000000000000000000000000000000000014
62659+:10BC30000000000000000000000000000000000004
62660+:10BC400000000000000000000000000000000000F4
62661+:10BC500000000000000000000000000000000000E4
62662+:10BC600000000000000000000000000000000000D4
62663+:10BC700000000000000000000000000000000000C4
62664+:10BC800000000000000000000000000000000000B4
62665+:10BC900000000000000000000000000000000000A4
62666+:10BCA0000000000000000000000000000000000094
62667+:10BCB0000000000000000000000000000000000084
62668+:10BCC0000000000000000000000000000000000074
62669+:10BCD0000000000000000000000000000000000064
62670+:10BCE0000000000000000000000000000000000054
62671+:10BCF0000000000000000000000000000000000044
62672+:10BD00000000000000000000000000000000000033
62673+:10BD10000000000000000000000000000000000023
62674+:10BD20000000000000000000000000000000000013
62675+:10BD30000000000000000000000000000000000003
62676+:10BD400000000000000000000000000000000000F3
62677+:10BD500000000000000000000000000000000000E3
62678+:10BD600000000000000000000000000000000000D3
62679+:10BD700000000000000000000000000000000000C3
62680+:10BD800000000000000000000000000000000000B3
62681+:10BD900000000000000000000000000000000000A3
62682+:10BDA0000000000000000000000000000000000093
62683+:10BDB0000000000000000000000000000000000083
62684+:10BDC0000000000000000000000000000000000073
62685+:10BDD0000000000000000000000000000000000063
62686+:10BDE0000000000000000000000000000000000053
62687+:10BDF0000000000000000000000000000000000043
62688+:10BE00000000000000000000000000000000000032
62689+:10BE10000000000000000000000000000000000022
62690+:10BE20000000000000000000000000000000000012
62691+:10BE30000000000000000000000000000000000002
62692+:10BE400000000000000000000000000000000000F2
62693+:10BE500000000000000000000000000000000000E2
62694+:10BE600000000000000000000000000000000000D2
62695+:10BE700000000000000000000000000000000000C2
62696+:10BE800000000000000000000000000000000000B2
62697+:10BE900000000000000000000000000000000000A2
62698+:10BEA0000000000000000000000000000000000092
62699+:10BEB0000000000000000000000000000000000082
62700+:10BEC0000000000000000000000000000000000072
62701+:10BED0000000000000000000000000000000000062
62702+:10BEE0000000000000000000000000000000000052
62703+:10BEF0000000000000000000000000000000000042
62704+:10BF00000000000000000000000000000000000031
62705+:10BF10000000000000000000000000000000000021
62706+:10BF20000000000000000000000000000000000011
62707+:10BF30000000000000000000000000000000000001
62708+:10BF400000000000000000000000000000000000F1
62709+:10BF500000000000000000000000000000000000E1
62710+:10BF600000000000000000000000000000000000D1
62711+:10BF700000000000000000000000000000000000C1
62712+:10BF800000000000000000000000000000000000B1
62713+:10BF900000000000000000000000000000000000A1
62714+:10BFA0000000000000000000000000000000000091
62715+:10BFB0000000000000000000000000000000000081
62716+:10BFC0000000000000000000000000000000000071
62717+:10BFD0000000000000000000000000000000000061
62718+:10BFE0000000000000000000000000000000000051
62719+:10BFF0000000000000000000000000000000000041
62720+:10C000000000000000000000000000000000000030
62721+:10C010000000000000000000000000000000000020
62722+:10C020000000000000000000000000000000000010
62723+:10C030000000000000000000000000000000000000
62724+:10C0400000000000000000000000000000000000F0
62725+:10C0500000000000000000000000000000000000E0
62726+:10C0600000000000000000000000000000000000D0
62727+:10C0700000000000000000000000000000000000C0
62728+:10C0800000000000000000000000000000000000B0
62729+:10C0900000000000000000000000000000000000A0
62730+:10C0A0000000000000000000000000000000000090
62731+:10C0B0000000000000000000000000000000000080
62732+:10C0C0000000000000000000000000000000000070
62733+:10C0D0000000000000000000000000000000000060
62734+:10C0E0000000000000000000000000000000000050
62735+:10C0F0000000000000000000000000000000000040
62736+:10C10000000000000000000000000000000000002F
62737+:10C11000000000000000000000000000000000001F
62738+:10C12000000000000000000000000000000000000F
62739+:10C1300000000000000000000000000000000000FF
62740+:10C1400000000000000000000000000000000000EF
62741+:10C1500000000000000000000000000000000000DF
62742+:10C1600000000000000000000000000000000000CF
62743+:10C1700000000000000000000000000000000000BF
62744+:10C1800000000000000000000000000000000000AF
62745+:10C19000000000000000000000000000000000009F
62746+:10C1A000000000000000000000000000000000008F
62747+:10C1B000000000000000000000000000000000007F
62748+:10C1C000000000000000000000000000000000006F
62749+:10C1D000000000000000000000000000000000005F
62750+:10C1E000000000000000000000000000000000004F
62751+:10C1F000000000000000000000000000000000003F
62752+:10C20000000000000000000000000000000000002E
62753+:10C21000000000000000000000000000000000001E
62754+:10C22000000000000000000000000000000000000E
62755+:10C2300000000000000000000000000000000000FE
62756+:10C2400000000000000000000000000000000000EE
62757+:10C2500000000000000000000000000000000000DE
62758+:10C2600000000000000000000000000000000000CE
62759+:10C2700000000000000000000000000000000000BE
62760+:10C2800000000000000000000000000000000000AE
62761+:10C29000000000000000000000000000000000009E
62762+:10C2A000000000000000000000000000000000008E
62763+:10C2B000000000000000000000000000000000007E
62764+:10C2C000000000000000000000000000000000006E
62765+:10C2D000000000000000000000000000000000005E
62766+:10C2E000000000000000000000000000000000004E
62767+:10C2F000000000000000000000000000000000003E
62768+:10C30000000000000000000000000000000000002D
62769+:10C31000000000000000000000000000000000001D
62770+:10C32000000000000000000000000000000000000D
62771+:10C3300000000000000000000000000000000000FD
62772+:10C3400000000000000000000000000000000000ED
62773+:10C3500000000000000000000000000000000000DD
62774+:10C3600000000000000000000000000000000000CD
62775+:10C3700000000000000000000000000000000000BD
62776+:10C3800000000000000000000000000000000000AD
62777+:10C39000000000000000000000000000000000009D
62778+:10C3A000000000000000000000000000000000008D
62779+:10C3B000000000000000000000000000000000007D
62780+:10C3C000000000000000000000000000000000006D
62781+:10C3D000000000000000000000000000000000005D
62782+:10C3E000000000000000000000000000000000004D
62783+:10C3F000000000000000000000000000000000003D
62784+:10C40000000000000000000000000000000000002C
62785+:10C41000000000000000000000000000000000001C
62786+:10C42000000000000000000000000000000000000C
62787+:10C4300000000000000000000000000000000000FC
62788+:10C4400000000000000000000000000000000000EC
62789+:10C4500000000000000000000000000000000000DC
62790+:10C4600000000000000000000000000000000000CC
62791+:10C4700000000000000000000000000000000000BC
62792+:10C4800000000000000000000000000000000000AC
62793+:10C49000000000000000000000000000000000009C
62794+:10C4A000000000000000000000000000000000008C
62795+:10C4B000000000000000000000000000000000007C
62796+:10C4C000000000000000000000000000000000006C
62797+:10C4D000000000000000000000000000000000005C
62798+:10C4E000000000000000000000000000000000004C
62799+:10C4F000000000000000000000000000000000003C
62800+:10C50000000000000000000000000000000000002B
62801+:10C51000000000000000000000000000000000001B
62802+:10C52000000000000000000000000000000000000B
62803+:10C5300000000000000000000000000000000000FB
62804+:10C5400000000000000000000000000000000000EB
62805+:10C5500000000000000000000000000000000000DB
62806+:10C5600000000000000000000000000000000000CB
62807+:10C5700000000000000000000000000000000000BB
62808+:10C5800000000000000000000000000000000000AB
62809+:10C59000000000000000000000000000000000009B
62810+:10C5A000000000000000000000000000000000008B
62811+:10C5B000000000000000000000000000000000007B
62812+:10C5C000000000000000000000000000000000006B
62813+:10C5D000000000000000000000000000000000005B
62814+:10C5E000000000000000000000000000000000004B
62815+:10C5F000000000000000000000000000000000003B
62816+:10C60000000000000000000000000000000000002A
62817+:10C61000000000000000000000000000000000001A
62818+:10C62000000000000000000000000000000000000A
62819+:10C6300000000000000000000000000000000000FA
62820+:10C6400000000000000000000000000000000000EA
62821+:10C6500000000000000000000000000000000000DA
62822+:10C6600000000000000000000000000000000000CA
62823+:10C6700000000000000000000000000000000000BA
62824+:10C6800000000000000000000000000000000000AA
62825+:10C69000000000000000000000000000000000009A
62826+:10C6A000000000000000000000000000000000008A
62827+:10C6B000000000000000000000000000000000007A
62828+:10C6C000000000000000000000000000000000006A
62829+:10C6D000000000000000000000000000000000005A
62830+:10C6E000000000000000000000000000000000004A
62831+:10C6F000000000000000000000000000000000003A
62832+:10C700000000000000000000000000000000000029
62833+:10C710000000000000000000000000000000000019
62834+:10C720000000000000000000000000000000000009
62835+:10C7300000000000000000000000000000000000F9
62836+:10C7400000000000000000000000000000000000E9
62837+:10C7500000000000000000000000000000000000D9
62838+:10C7600000000000000000000000000000000000C9
62839+:10C7700000000000000000000000000000000000B9
62840+:10C7800000000000000000000000000000000000A9
62841+:10C790000000000000000000000000000000000099
62842+:10C7A0000000000000000000000000000000000089
62843+:10C7B0000000000000000000000000000000000079
62844+:10C7C0000000000000000000000000000000000069
62845+:10C7D0000000000000000000000000000000000059
62846+:10C7E0000000000000000000000000000000000049
62847+:10C7F0000000000000000000000000000000000039
62848+:10C800000000000000000000000000000000000028
62849+:10C810000000000000000000000000000000000018
62850+:10C820000000000000000000000000000000000008
62851+:10C8300000000000000000000000000000000000F8
62852+:10C8400000000000000000000000000000000000E8
62853+:10C8500000000000000000000000000000000000D8
62854+:10C8600000000000000000000000000000000000C8
62855+:10C8700000000000000000000000000000000000B8
62856+:10C8800000000000000000000000000000000000A8
62857+:10C890000000000000000000000000000000000098
62858+:10C8A0000000000000000000000000000000000088
62859+:10C8B0000000000000000000000000000000000078
62860+:10C8C0000000000000000000000000000000000068
62861+:10C8D0000000000000000000000000000000000058
62862+:10C8E0000000000000000000000000000000000048
62863+:10C8F0000000000000000000000000000000000038
62864+:10C900000000000000000000000000000000000027
62865+:10C910000000000000000000000000000000000017
62866+:10C920000000000000000000000000000000000007
62867+:10C9300000000000000000000000000000000000F7
62868+:10C9400000000000000000000000000000000000E7
62869+:10C9500000000000000000000000000000000000D7
62870+:10C9600000000000000000000000000000000000C7
62871+:10C9700000000000000000000000000000000000B7
62872+:10C9800000000000000000000000000000000000A7
62873+:10C990000000000000000000000000000000000097
62874+:10C9A0000000000000000000000000000000000087
62875+:10C9B0000000000000000000000000000000000077
62876+:10C9C0000000000000000000000000000000000067
62877+:10C9D0000000000000000000000000000000000057
62878+:10C9E0000000000000000000000000000000000047
62879+:10C9F0000000000000000000000000000000000037
62880+:10CA00000000000000000000000000000000000026
62881+:10CA10000000000000000000000000000000000016
62882+:10CA20000000000000000000000000000000000006
62883+:10CA300000000000000000000000000000000000F6
62884+:10CA400000000000000000000000000000000000E6
62885+:10CA500000000000000000000000000000000000D6
62886+:10CA600000000000000000000000000000000000C6
62887+:10CA700000000000000000000000000000000000B6
62888+:10CA800000000000000000000000000000000000A6
62889+:10CA90000000000000000000000000000000000096
62890+:10CAA0000000000000000000000000000000000086
62891+:10CAB0000000000000000000000000000000000076
62892+:10CAC0000000000000000000000000000000000066
62893+:10CAD0000000000000000000000000000000000056
62894+:10CAE0000000000000000000000000000000000046
62895+:10CAF0000000000000000000000000000000000036
62896+:10CB00000000000000000000000000000000000025
62897+:10CB10000000000000000000000000000000000015
62898+:10CB20000000000000000000000000000000000005
62899+:10CB300000000000000000000000000000000000F5
62900+:10CB400000000000000000000000000000000000E5
62901+:10CB500000000000000000000000000000000000D5
62902+:10CB600000000000000000000000000000000000C5
62903+:10CB700000000000000000000000000000000000B5
62904+:10CB800000000000000000000000000000000000A5
62905+:10CB90000000000000000000000000000000000095
62906+:10CBA0000000000000000000000000000000000085
62907+:10CBB0000000000000000000000000000000000075
62908+:10CBC0000000000000000000000000000000000065
62909+:10CBD0000000000000000000000000000000000055
62910+:10CBE0000000000000000000000000000000000045
62911+:10CBF0000000000000000000000000000000000035
62912+:10CC00000000000000000000000000000000000024
62913+:10CC10000000000000000000000000000000000014
62914+:10CC20000000000000000000000000000000000004
62915+:10CC300000000000000000000000000000000000F4
62916+:10CC400000000000000000000000000000000000E4
62917+:10CC500000000000000000000000000000000000D4
62918+:10CC600000000000000000000000000000000000C4
62919+:10CC700000000000000000000000000000000000B4
62920+:10CC800000000000000000000000000000000000A4
62921+:10CC90000000000000000000000000000000000094
62922+:10CCA0000000000000000000000000000000000084
62923+:10CCB0000000000000000000000000000000000074
62924+:10CCC0000000000000000000000000000000000064
62925+:10CCD0000000000000000000000000000000000054
62926+:10CCE0000000000000000000000000000000000044
62927+:10CCF0000000000000000000000000000000000034
62928+:10CD00000000000000000000000000000000000023
62929+:10CD10000000000000000000000000000000000013
62930+:10CD20000000000000000000000000000000000003
62931+:10CD300000000000000000000000000000000000F3
62932+:10CD400000000000000000000000000000000000E3
62933+:10CD500000000000000000000000000000000000D3
62934+:10CD600000000000000000000000000000000000C3
62935+:10CD700000000000000000000000000000000000B3
62936+:10CD800000000000000000000000000000000000A3
62937+:10CD90000000000000000000000000000000000093
62938+:10CDA0000000000000000000000000000000000083
62939+:10CDB0000000000000000000000000000000000073
62940+:10CDC0000000000000000000000000000000000063
62941+:10CDD0000000000000000000000000000000000053
62942+:10CDE0000000000000000000000000000000000043
62943+:10CDF0000000000000000000000000000000000033
62944+:10CE00000000000000000000000000000000000022
62945+:10CE10000000000000000000000000000000000012
62946+:10CE20000000000000000000000000000000000002
62947+:10CE300000000000000000000000000000000000F2
62948+:10CE400000000000000000000000000000000000E2
62949+:10CE500000000000000000000000000000000000D2
62950+:10CE600000000000000000000000000000000000C2
62951+:10CE700000000000000000000000000000000000B2
62952+:10CE800000000000000000000000000000000000A2
62953+:10CE90000000000000000000000000000000000092
62954+:10CEA0000000000000000000000000000000000082
62955+:10CEB0000000000000000000000000000000000072
62956+:10CEC0000000000000000000000000000000000062
62957+:10CED0000000000000000000000000000000000052
62958+:10CEE0000000000000000000000000000000000042
62959+:10CEF0000000000000000000000000000000000032
62960+:10CF00000000000000000000000000000000000021
62961+:10CF10000000000000000000000000000000000011
62962+:10CF20000000000000000000000000000000000001
62963+:10CF300000000000000000000000000000000000F1
62964+:10CF400000000000000000000000000000000000E1
62965+:10CF500000000000000000000000000000000000D1
62966+:10CF600000000000000000000000000000000000C1
62967+:10CF700000000000000000000000000000000000B1
62968+:10CF800000000000000000000000000000000000A1
62969+:10CF90000000000000000000000000000000000091
62970+:10CFA0000000000000000000000000000000000081
62971+:10CFB0000000000000000000000000000000000071
62972+:10CFC0000000000000000000000000000000000061
62973+:10CFD0000000000000000000000000000000000051
62974+:10CFE0000000000000000000000000000000000041
62975+:10CFF0000000000000000000000000000000000031
62976+:10D000000000000000000000000000000000000020
62977+:10D010000000000000000000000000000000000010
62978+:10D020000000000000000000000000000000000000
62979+:10D0300000000000000000000000000000000000F0
62980+:10D0400000000000000000000000000000000000E0
62981+:10D0500000000000000000000000000000000000D0
62982+:10D0600000000000000000000000000000000000C0
62983+:10D0700000000000000000000000000000000000B0
62984+:10D0800000000000000000000000000000000000A0
62985+:10D090000000000000000000000000000000000090
62986+:10D0A0000000000000000000000000000000000080
62987+:10D0B0000000000000000000000000000000000070
62988+:10D0C0000000000000000000000000000000000060
62989+:10D0D0000000000000000000000000000000000050
62990+:10D0E0000000000000000000000000000000000040
62991+:10D0F0000000000000000000000000000000000030
62992+:10D10000000000000000000000000000000000001F
62993+:10D11000000000000000000000000000000000000F
62994+:10D1200000000000000000000000000000000000FF
62995+:10D1300000000000000000000000000000000000EF
62996+:10D1400000000000000000000000000000000000DF
62997+:10D1500000000000000000000000000000000000CF
62998+:10D1600000000000000000000000000000000000BF
62999+:10D1700000000000000000000000000000000000AF
63000+:10D18000000000000000000000000000000000009F
63001+:10D19000000000000000000000000000000000008F
63002+:10D1A000000000000000000000000000000000007F
63003+:10D1B000000000000000000000000000000000006F
63004+:10D1C000000000000000000000000000000000005F
63005+:10D1D000000000000000000000000000000000004F
63006+:10D1E000000000000000000000000000000000003F
63007+:10D1F000000000000000000000000000000000002F
63008+:10D20000000000000000000000000000000000001E
63009+:10D21000000000000000000000000000000000000E
63010+:10D2200000000000000000000000000000000000FE
63011+:10D2300000000000000000000000000000000000EE
63012+:10D2400000000000000000000000000000000000DE
63013+:10D2500000000000000000000000000000000000CE
63014+:10D2600000000000000000000000000000000000BE
63015+:10D2700000000000000000000000000000000000AE
63016+:10D28000000000000000000000000000000000009E
63017+:10D29000000000000000000000000000000000008E
63018+:10D2A000000000000000000000000000000000007E
63019+:10D2B000000000000000000000000000000000006E
63020+:10D2C000000000000000000000000000000000005E
63021+:10D2D000000000000000000000000000000000004E
63022+:10D2E000000000000000000000000000000000003E
63023+:10D2F000000000000000000000000000000000002E
63024+:10D30000000000000000000000000000000000001D
63025+:10D31000000000000000000000000000000000000D
63026+:10D3200000000000000000000000000000000000FD
63027+:10D3300000000000000000000000000000000000ED
63028+:10D3400000000000000000000000000000000000DD
63029+:10D3500000000000000000000000000000000000CD
63030+:10D3600000000000000000000000000000000000BD
63031+:10D3700000000000000000000000000000000000AD
63032+:10D38000000000000000000000000000000000009D
63033+:10D39000000000000000000000000000000000008D
63034+:10D3A000000000000000000000000000000000007D
63035+:10D3B000000000000000000000000000000000006D
63036+:10D3C000000000000000000000000000000000005D
63037+:10D3D000000000000000000000000000000000004D
63038+:10D3E000000000000000000000000000000000003D
63039+:10D3F000000000000000000000000000000000002D
63040+:10D40000000000000000000000000000000000001C
63041+:10D41000000000000000000000000000000000000C
63042+:10D4200000000000000000000000000000000000FC
63043+:10D4300000000000000000000000000000000000EC
63044+:10D4400000000000000000000000000000000000DC
63045+:10D4500000000000000000000000000000000000CC
63046+:10D4600000000000000000000000000000000000BC
63047+:10D4700000000000000000000000000000000000AC
63048+:10D48000000000000000000000000000000000009C
63049+:10D49000000000000000000000000000000000008C
63050+:10D4A000000000000000000000000000000000007C
63051+:10D4B000000000000000000000000000000000006C
63052+:10D4C000000000000000000000000000000000005C
63053+:10D4D000000000000000000000000000000000004C
63054+:10D4E000000000000000000000000000000000003C
63055+:10D4F000000000000000000000000000000000002C
63056+:10D50000000000000000000000000000000000001B
63057+:10D51000000000000000000000000000000000000B
63058+:10D5200000000000000000000000000000000000FB
63059+:10D5300000000000000000000000000000000000EB
63060+:10D5400000000000000000000000000000000000DB
63061+:10D5500000000000000000000000000000000000CB
63062+:10D5600000000000000000000000000000000000BB
63063+:10D5700000000000000000000000000000000000AB
63064+:10D58000000000000000000000000000000000009B
63065+:10D59000000000000000000000000000000000008B
63066+:10D5A000000000000000000000000000000000007B
63067+:10D5B000000000000000000000000000000000006B
63068+:10D5C000000000000000000000000000000000005B
63069+:10D5D000000000000000000000000000000000004B
63070+:10D5E000000000000000000000000000000000003B
63071+:10D5F000000000000000000000000000000000002B
63072+:10D60000000000000000000000000000000000001A
63073+:10D61000000000000000000000000000000000000A
63074+:10D6200000000000000000000000000000000000FA
63075+:10D6300000000000000000000000000000000000EA
63076+:10D6400000000000000000000000000000000000DA
63077+:10D6500000000000000000000000000000000000CA
63078+:10D6600000000000000000000000000000000000BA
63079+:10D6700000000000000000000000000000000000AA
63080+:10D68000000000000000000000000000000000009A
63081+:10D69000000000000000000000000000000000008A
63082+:10D6A000000000000000000000000000000000007A
63083+:10D6B000000000000000000000000000000000006A
63084+:10D6C000000000000000000000000000000000005A
63085+:10D6D000000000000000000000000000000000004A
63086+:10D6E000000000000000000000000000000000003A
63087+:10D6F000000000000000000000000000000000002A
63088+:10D700000000000000000000000000000000000019
63089+:10D710000000000000000000000000000000000009
63090+:10D7200000000000000000000000000000000000F9
63091+:10D7300000000000000000000000000000000000E9
63092+:10D7400000000000000000000000000000000000D9
63093+:10D7500000000000000000000000000000000000C9
63094+:10D7600000000000000000000000000000000000B9
63095+:10D7700000000000000000000000000000000000A9
63096+:10D780000000000000000000000000000000000099
63097+:10D790000000000000000000000000000000000089
63098+:10D7A0000000000000000000000000000000000079
63099+:10D7B0000000000000000000000000000000000069
63100+:10D7C0000000000000000000000000000000000059
63101+:10D7D0000000000000000000000000000000000049
63102+:10D7E0000000000000000000000000000000000039
63103+:10D7F0000000000000000000000000000000000029
63104+:10D800000000000000000000000000000000000018
63105+:10D810000000000000000000000000000000000008
63106+:10D8200000000000000000000000000000000000F8
63107+:10D8300000000000000000000000000000000000E8
63108+:10D8400000000000000000000000000000000000D8
63109+:10D8500000000000000000000000000000000000C8
63110+:10D8600000000000000000000000000000000000B8
63111+:10D8700000000000000000000000000000000000A8
63112+:10D880000000000000000000000000000000000098
63113+:10D890000000000000000000000000000000000088
63114+:10D8A0000000000000000000000000000000000078
63115+:10D8B0000000000000000000000000000000000068
63116+:10D8C0000000000000000000000000000000000058
63117+:10D8D0000000000000000000000000000000000048
63118+:10D8E0000000000000000000000000000000000038
63119+:10D8F0000000000000000000000000000000000028
63120+:10D900000000000000000000000000000000000017
63121+:10D910000000000000000000000000000000000007
63122+:10D9200000000000000000000000000000000000F7
63123+:10D9300000000000000000000000000000000000E7
63124+:10D9400000000000000000000000000000000000D7
63125+:10D9500000000000000000000000000000000000C7
63126+:10D9600000000000000000000000000000000000B7
63127+:10D9700000000000000000000000000000000000A7
63128+:10D980000000000000000000000000000000000097
63129+:10D990000000000000000000000000000000000087
63130+:10D9A0000000000000000000000000000000000077
63131+:10D9B0000000000000000000000000000000000067
63132+:10D9C0000000000000000000000000000000000057
63133+:10D9D0000000000000000000000000000000000047
63134+:10D9E0000000000000000000000000000000000037
63135+:10D9F0000000000000000000000000000000000027
63136+:10DA00000000000000000000000000000000000016
63137+:10DA10000000000000000000000000000000000006
63138+:10DA200000000000000000000000000000000000F6
63139+:10DA300000000000000000000000000000000000E6
63140+:10DA400000000000000000000000000000000000D6
63141+:10DA500000000000000000000000000000000000C6
63142+:10DA600000000000000000000000000000000000B6
63143+:10DA700000000000000000000000000000000000A6
63144+:10DA80000000000000000000000000000000000096
63145+:10DA90000000000000000000000000000000000086
63146+:10DAA0000000000000000000000000000000000076
63147+:10DAB0000000000000000000000000000000000066
63148+:10DAC0000000000000000000000000000000000056
63149+:10DAD0000000000000000000000000000000000046
63150+:10DAE0000000000000000000000000000000000036
63151+:10DAF0000000000000000000000000000000000026
63152+:10DB00000000000000000000000000000000000015
63153+:10DB10000000000000000000000000000000000005
63154+:10DB200000000000000000000000000000000000F5
63155+:10DB300000000000000000000000000000000000E5
63156+:10DB400000000000000000000000000000000000D5
63157+:10DB500000000000000000000000000000000000C5
63158+:10DB600000000000000000000000000000000000B5
63159+:10DB700000000000000000000000000000000000A5
63160+:10DB80000000000000000000000000000000000095
63161+:10DB90000000000000000000000000000000000085
63162+:10DBA0000000000000000000000000000000000075
63163+:10DBB0000000000000000000000000000000000065
63164+:10DBC0000000000000000000000000000000000055
63165+:10DBD0000000000000000000000000000000000045
63166+:10DBE0000000000000000000000000000000000035
63167+:10DBF0000000000000000000000000000000000025
63168+:10DC00000000000000000000000000000000000014
63169+:10DC10000000000000000000000000000000000004
63170+:10DC200000000000000000000000000000000000F4
63171+:10DC300000000000000000000000000000000000E4
63172+:10DC400000000000000000000000000000000000D4
63173+:10DC500000000000000000000000000000000000C4
63174+:10DC600000000000000000000000000000000000B4
63175+:10DC700000000000000000000000000000000000A4
63176+:10DC80000000000000000000000000000000000094
63177+:10DC90000000000000000000000000000000000084
63178+:10DCA0000000000000000000000000000000000074
63179+:10DCB0000000000000000000000000000000000064
63180+:10DCC0000000000000000000000000000000000054
63181+:10DCD0000000000000000000000000000000000044
63182+:10DCE0000000000000000000000000000000000034
63183+:10DCF0000000000000000000000000000000000024
63184+:10DD00000000000000000000000000000000000013
63185+:10DD10000000000000000000000000000000000003
63186+:10DD200000000000000000000000000000000000F3
63187+:10DD300000000000000000000000000000000000E3
63188+:10DD400000000000000000000000000000000000D3
63189+:10DD500000000000000000000000000000000000C3
63190+:10DD600000000000000000000000000000000000B3
63191+:10DD700000000000000000000000000000000000A3
63192+:10DD80000000000000000000000000000000000093
63193+:10DD90000000000000000000000000000000000083
63194+:10DDA0000000000000000000000000000000000073
63195+:10DDB0000000000000000000000000000000000063
63196+:10DDC0000000000000000000000000000000000053
63197+:10DDD0000000000000000000000000000000000043
63198+:10DDE0000000000000000000000000000000000033
63199+:10DDF0000000000000000000000000000000000023
63200+:10DE00000000000000000000000000000000000012
63201+:10DE10000000000000000000000000000000000002
63202+:10DE200000000000000000000000000000000000F2
63203+:10DE300000000000000000000000000000000000E2
63204+:10DE400000000000000000000000000000000000D2
63205+:10DE500000000000000000000000000000000000C2
63206+:10DE600000000000000000000000000000000000B2
63207+:10DE700000000000000000000000000000000000A2
63208+:10DE80000000000000000000000000000000000092
63209+:10DE90000000000000000000000000000000000082
63210+:10DEA0000000000000000000000000000000000072
63211+:10DEB0000000000000000000000000000000000062
63212+:10DEC0000000000000000000000000000000000052
63213+:10DED0000000000000000000000000000000000042
63214+:10DEE0000000000000000000000000000000000032
63215+:10DEF0000000000000000000000000000000000022
63216+:10DF00000000000000000000000000000000000011
63217+:10DF10000000000000000000000000000000000001
63218+:10DF200000000000000000000000000000000000F1
63219+:10DF300000000000000000000000000000000000E1
63220+:10DF400000000000000000000000000000000000D1
63221+:10DF500000000000000000000000000000000000C1
63222+:10DF600000000000000000000000000000000000B1
63223+:10DF700000000000000000000000000000000000A1
63224+:10DF80000000000000000000000000000000000091
63225+:10DF90000000000000000000000000000000000081
63226+:10DFA0000000000000000000000000000000000071
63227+:10DFB0000000000000000000000000000000000061
63228+:10DFC0000000000000000000000000000000000051
63229+:10DFD0000000000000000000000000000000000041
63230+:10DFE0000000000000000000000000000000000031
63231+:10DFF0000000000000000000000000000000000021
63232+:10E000000000000000000000000000000000000010
63233+:10E010000000000000000000000000000000000000
63234+:10E0200000000000000000000000000000000000F0
63235+:10E0300000000000000000000000000000000000E0
63236+:10E0400000000000000000000000000000000000D0
63237+:10E0500000000000000000000000000000000000C0
63238+:10E0600000000000000000000000000000000000B0
63239+:10E0700000000000000000000000000000000000A0
63240+:10E080000000000000000000000000000000000090
63241+:10E090000000000000000000000000000000000080
63242+:10E0A0000000000000000000000000000000000070
63243+:10E0B0000000000000000000000000000000000060
63244+:10E0C0000000000000000000000000000000000050
63245+:10E0D0000000000000000000000000000000000040
63246+:10E0E0000000000000000000000000000000000030
63247+:10E0F0000000000000000000000000000000000020
63248+:10E10000000000000000000000000000000000000F
63249+:10E1100000000000000000000000000000000000FF
63250+:10E1200000000000000000000000000000000000EF
63251+:10E1300000000000000000000000000000000000DF
63252+:10E1400000000000000000000000000000000000CF
63253+:10E1500000000000000000000000000000000000BF
63254+:10E1600000000000000000000000000000000000AF
63255+:10E17000000000000000000000000000000000009F
63256+:10E18000000000000000000000000000000000008F
63257+:10E19000000000000000000000000000000000007F
63258+:10E1A000000000000000000000000000000000006F
63259+:10E1B000000000000000000000000000000000005F
63260+:10E1C000000000000000000000000000000000004F
63261+:10E1D000000000000000000000000000000000003F
63262+:10E1E000000000000000000000000000000000002F
63263+:10E1F000000000000000000000000000000000809F
63264+:10E20000000000000000000000000000000000000E
63265+:10E2100000000000000000000000000000000000FE
63266+:10E220000000000A000000000000000000000000E4
63267+:10E2300010000003000000000000000D0000000DB1
63268+:10E240003C020801244295C03C030801246397FC6A
63269+:10E25000AC4000000043202B1480FFFD244200044A
63270+:10E260003C1D080037BD9FFC03A0F0213C100800B6
63271+:10E27000261032103C1C0801279C95C00E0012BECF
63272+:10E28000000000000000000D3C02800030A5FFFFF0
63273+:10E2900030C600FF344301803C0880008D0901B87E
63274+:10E2A0000520FFFE00000000AC6400002404000212
63275+:10E2B000A4650008A066000AA064000BAC67001803
63276+:10E2C0003C03100003E00008AD0301B83C0560000A
63277+:10E2D0008CA24FF80440FFFE00000000ACA44FC029
63278+:10E2E0003C0310003C040200ACA44FC403E000084F
63279+:10E2F000ACA34FF89486000C00A050212488001491
63280+:10E3000000062B0200051080004448210109182B4B
63281+:10E310001060001100000000910300002C6400094F
63282+:10E320005080000991190001000360803C0D080134
63283+:10E3300025AD9258018D58218D67000000E000083E
63284+:10E340000000000091190001011940210109302B42
63285+:10E3500054C0FFF29103000003E000080000102108
63286+:10E360000A000CCC25080001910F0001240E000AC0
63287+:10E3700015EE00400128C8232F38000A1700003D81
63288+:10E38000250D00028D580000250F0006370E0100F4
63289+:10E39000AD4E0000910C000291AB000191A400026F
63290+:10E3A00091A60003000C2E00000B3C0000A71025D6
63291+:10E3B00000041A000043C8250326C025AD580004F8
63292+:10E3C000910E000691ED000191E7000291E5000336
63293+:10E3D000000E5E00000D6400016C30250007220075
63294+:10E3E00000C41025004518252508000A0A000CCC99
63295+:10E3F000AD430008910F000125040002240800022B
63296+:10E4000055E80001012020210A000CCC00804021A9
63297+:10E41000910C0001240B0003158B00160000000076
63298+:10E420008D580000910E000225080003370D0008EA
63299+:10E43000A14E00100A000CCCAD4D00009119000156
63300+:10E44000240F0004172F000B0000000091070002AA
63301+:10E45000910400038D43000000072A0000A410254A
63302+:10E460003466000425080004AD42000C0A000CCC00
63303+:10E47000AD46000003E000082402000127BDFFE8CC
63304+:10E48000AFBF0014AFB000100E00164E0080802108
63305+:10E490003C0480083485008090A600052403FFFE1C
63306+:10E4A0000200202100C310248FBF00148FB0001081
63307+:10E4B000A0A200050A00165827BD001827BDFFE8D6
63308+:10E4C000AFB00010AFBF00140E000FD40080802149
63309+:10E4D0003C06800834C5008090A40000240200504F
63310+:10E4E000308300FF106200073C09800002002021F9
63311+:10E4F0008FBF00148FB00010AD2001800A00108F74
63312+:10E5000027BD0018240801003C07800002002021DC
63313+:10E510008FBF00148FB00010ACE801800A00108F8C
63314+:10E5200027BD001827BDFF783C058008AFBE0080DE
63315+:10E53000AFB7007CAFB3006CAFB10064AFBF008475
63316+:10E54000AFB60078AFB50074AFB40070AFB200687A
63317+:10E55000AFB0006034A600803C0580008CB201287A
63318+:10E5600090C400098CA701043C020001309100FF17
63319+:10E5700000E218240000B8210000F021106000071C
63320+:10E58000000098213C0908008D2931F02413000176
63321+:10E59000252800013C010800AC2831F0ACA0008423
63322+:10E5A00090CC0005000C5827316A0001154000721C
63323+:10E5B000AFA0005090CD00002406002031A400FF41
63324+:10E5C00010860018240E0050108E009300000000EA
63325+:10E5D0003C1008008E1000DC260F00013C010800F2
63326+:10E5E000AC2F00DC0E0016C7000000000040182110
63327+:10E5F0008FBF00848FBE00808FB7007C8FB60078FD
63328+:10E600008FB500748FB400708FB3006C8FB2006848
63329+:10E610008FB100648FB000600060102103E000083B
63330+:10E6200027BD00880000000D3C1F8000AFA0003017
63331+:10E6300097E501168FE201043C04002030B9FFFF8A
63332+:10E64000004438240007182B00033140AFA60030E7
63333+:10E650008FF5010437F80C003C1600400338802188
63334+:10E6600002B6A02434C40040128000479215000D69
63335+:10E6700032A800201500000234860080008030217E
63336+:10E6800014C0009FAFA600303C0D800835A6008066
63337+:10E6900090CC0008318B0040516000063C06800899
63338+:10E6A000240E0004122E00A8240F0012122F003294
63339+:10E6B0003C06800834C401003C0280009447011AE3
63340+:10E6C0009619000E909F00088E18000830E3FFFF97
63341+:10E6D00003F9B00432B40004AFB6005CAFA3005835
63342+:10E6E0008E1600041280002EAFB8005434C3008090
63343+:10E6F000906800083105004014A0002500000000CB
63344+:10E700008C70005002D090230640000500000000ED
63345+:10E710008C71003402D1A82306A201678EE20008A2
63346+:10E72000126000063C1280003C1508008EB531F4E2
63347+:10E7300026B600013C010800AC3631F4AE4000447E
63348+:10E74000240300018FBF00848FBE00808FB7007C40
63349+:10E750008FB600788FB500748FB400708FB3006CE3
63350+:10E760008FB200688FB100648FB00060006010212C
63351+:10E7700003E0000827BD00880E000D2800002021BE
63352+:10E780000A000D75004018210A000D9500C02021D7
63353+:10E790000E00171702C020211440FFE10000000006
63354+:10E7A0003C0B8008356400808C8A003402CA482300
63355+:10E7B0000520001D000000003C1E08008FDE310017
63356+:10E7C00027D700013C010800AC3731001260000679
63357+:10E7D000024020213C1408008E9431F42690000160
63358+:10E7E0003C010800AC3031F40E00164E3C1E80088F
63359+:10E7F00037CD008091B700250240202136EE00047D
63360+:10E800000E001658A1AE00250E000CAC02402021CF
63361+:10E810000A000DCA240300013C17080126F796C020
63362+:10E820000A000D843C1F80008C86003002C66023E5
63363+:10E830001980000C2419000C908F004F3C14080024
63364+:10E840008E94310032B500FC35ED0001268E0001BA
63365+:10E850003C010800AC2E3100A08D004FAFA0005845
63366+:10E860002419000CAFB900308C9800300316A02397
63367+:10E870001A80010B8FA300580074F82A17E0FFD309
63368+:10E88000000000001074002A8FA5005802D4B021A7
63369+:10E8900000B410233044FFFFAFA4005832A8000298
63370+:10E8A0001100002E32AB00103C15800836B00080FD
63371+:10E8B0009216000832D30040526000FB8EE200083E
63372+:10E8C0000E00164E02402021240A0018A20A000958
63373+:10E8D000921100052409FFFE024020210229902404
63374+:10E8E0000E001658A2120005240400390000282149
63375+:10E8F0000E0016F2240600180A000DCA24030001B7
63376+:10E9000092FE000C3C0A800835490080001EBB00C6
63377+:10E910008D27003836F10081024020213225F08118
63378+:10E920000E000C9B30C600FF0A000DC10000000065
63379+:10E930003AA7000130E300011460FFA402D4B02123
63380+:10E940000A000E1D00000000024020210E001734B6
63381+:10E95000020028210A000D75004018211160FF7087
63382+:10E960003C0F80083C0D800835EE00808DC40038D7
63383+:10E970008FA300548DA60004006660231D80FF68ED
63384+:10E98000000000000064C02307020001AFA400548F
63385+:10E990003C1F08008FFF31E433F9000113200015FC
63386+:10E9A0008FAC00583C07800094E3011A10600012FD
63387+:10E9B0003C0680080E00216A024020213C03080129
63388+:10E9C000906396F13064000214800145000000005D
63389+:10E9D000306C0004118000078FAC0058306600FBDB
63390+:10E9E0003C010801A02696F132B500FCAFA000580A
63391+:10E9F0008FAC00583C06800834D30080AFB40018B8
63392+:10EA0000AFB60010AFAC00143C088000950B01209D
63393+:10EA10008E6F0030966A005C8FA3005C8FBF003061
63394+:10EA20003169FFFF3144FFFF8FAE005401341021E4
63395+:10EA3000350540000064382B0045C82103E7C02598
63396+:10EA4000AFB90020AFAF0028AFB80030AFAF00249F
63397+:10EA5000AFA0002CAFAE0034926D000831B40008B6
63398+:10EA6000168000BB020020218EE200040040F8095D
63399+:10EA700027A400108FAF003031F300025660000170
63400+:10EA800032B500FE3C048008349F008093F90008F2
63401+:10EA900033380040530000138FA400248C850004F9
63402+:10EAA0008FA7005410A700D52404001432B0000131
63403+:10EAB0001200000C8FA400242414000C1234011A3C
63404+:10EAC0002A2D000D11A001022413000E240E000AAD
63405+:10EAD000522E0001241E00088FAF002425E40001FF
63406+:10EAE000AFA400248FAA00143C0B80083565008079
63407+:10EAF000008A48218CB10030ACA9003090A4004EAF
63408+:10EB00008CA700303408FFFF0088180400E3F821C8
63409+:10EB1000ACBF00348FA600308FB900548FB8005CB2
63410+:10EB200030C200081040000B033898218CAC002044
63411+:10EB3000119300D330C600FF92EE000C8FA7003473
63412+:10EB400002402021000E6B0035B400800E000C9BAB
63413+:10EB50003285F0803C028008345000808E0F0030F7
63414+:10EB600001F1302318C00097264800803C070800B8
63415+:10EB70008CE731E42404FF80010418243118007F5D
63416+:10EB80003C1F80003C19800430F10001AFE300908D
63417+:10EB900012200006031928213C030801906396F116
63418+:10EBA00030690008152000C6306A00F73C10800864
63419+:10EBB00036040080908C004F318B000115600042BC
63420+:10EBC000000000003C0608008CC6319830CE0010D2
63421+:10EBD00051C0004230F9000190AF006B55E0003F9A
63422+:10EBE00030F9000124180001A0B8006B3C1180002E
63423+:10EBF0009622007A24470064A48700123C0D800806
63424+:10EC000035A5008090B40008329000401600000442
63425+:10EC10003C03800832AE000115C0008B00000000EC
63426+:10EC2000346400808C86002010D3000A3463010015
63427+:10EC30008C67000002C7782319E000978FBF00544B
63428+:10EC4000AC93002024130001AC760000AFB3005059
63429+:10EC5000AC7F000417C0004E000000008FA90050D8
63430+:10EC60001520000B000000003C030801906396F1A2
63431+:10EC7000306A00011140002E8FAB0058306400FE56
63432+:10EC80003C010801A02496F10A000D75000018212E
63433+:10EC90000E000CAC024020210A000F1300000000FF
63434+:10ECA0000A000E200000A0210040F80924040017EB
63435+:10ECB0000A000DCA240300010040F80924040016CC
63436+:10ECC0000A000DCA240300019094004F240DFFFE9A
63437+:10ECD000028D2824A085004F30F900011320000682
63438+:10ECE0003C0480083C030801906396F1307F0010DB
63439+:10ECF00017E00051306800EF34900080240A0001D2
63440+:10ED0000024020210E00164EA60A00129203002592
63441+:10ED100024090001AFA90050346200010240202103
63442+:10ED20000E001658A20200250A000EF93C0D8008BC
63443+:10ED30001160FE83000018218FA5003030AC000464
63444+:10ED40001180FE2C8FBF00840A000DCB240300012C
63445+:10ED500027A500380E000CB6AFA000385440FF4382
63446+:10ED60008EE200048FB40038329001005200FF3F61
63447+:10ED70008EE200048FA3003C8E6E0058006E682364
63448+:10ED800005A3FF39AE6300580A000E948EE200041A
63449+:10ED90000E00164E024020213C038008346800809B
63450+:10EDA000024020210E001658A11E000903C0302188
63451+:10EDB000240400370E0016F2000028210A000F116B
63452+:10EDC0008FA900508FAB00185960FF8D3C0D800853
63453+:10EDD0000E00164E02402021920C00252405000151
63454+:10EDE000AFA5005035820004024020210E001658C5
63455+:10EDF000A20200250A000EF93C0D800812240059D9
63456+:10EE00002A2300151060004D240900162408000C68
63457+:10EE10005628FF2732B000013C0A8008914C001BA5
63458+:10EE20002406FFBD241E000E01865824A14B001BA2
63459+:10EE30000A000EA532B000013C010801A02896F19D
63460+:10EE40000A000EF93C0D80088CB500308EFE0008DB
63461+:10EE50002404001826B6000103C0F809ACB600303F
63462+:10EE60003C030801906396F13077000116E0FF81C2
63463+:10EE7000306A00018FB200300A000D753243000481
63464+:10EE80003C1080009605011A50A0FF2B34C60010DC
63465+:10EE90000A000EC892EE000C8C6200001456FF6D42
63466+:10EEA000000000008C7800048FB9005403388823D8
63467+:10EEB0000621FF638FBF00540A000F0E0000000000
63468+:10EEC0003C010801A02A96F10A000F3030F9000138
63469+:10EED0001633FF028FAF00240A000EB0241E00106C
63470+:10EEE0000E00164E024020213C0B80083568008041
63471+:10EEF00091090025240A0001AFAA0050353300040F
63472+:10EF0000024020210E001658A11300253C050801DF
63473+:10EF100090A596F130A200FD3C010801A02296F1D7
63474+:10EF20000A000E6D004018212411000E53D1FEEA94
63475+:10EF3000241E00100A000EAF241E00165629FEDC07
63476+:10EF400032B000013C0A8008914C001B2406FFBD32
63477+:10EF5000241E001001865824A14B001B0A000EA598
63478+:10EF600032B000010A000EA4241E00123C038000EF
63479+:10EF70008C6201B80440FFFE24040800AC6401B8B0
63480+:10EF800003E000080000000030A5FFFF30C6FFFFCF
63481+:10EF90003C0780008CE201B80440FFFE34EA0180A7
63482+:10EFA000AD440000ACE400203C0480089483004899
63483+:10EFB0003068FFFF11000016AF88000824AB001274
63484+:10EFC000010B482B512000133C04800034EF01005A
63485+:10EFD00095EE00208F890000240D001A31CCFFFF30
63486+:10EFE00031274000A14D000B10E000362583FFFEC5
63487+:10EFF0000103C02B170000348F9900048F88000490
63488+:10F00000A5430014350700010A001003AF87000470
63489+:10F010003C04800024030003348201808F890000B7
63490+:10F020008F870004A043000B3C088000350C018052
63491+:10F03000A585000EA585001A8F85000C30EB800099
63492+:10F04000A5890010AD850028A58600081160000F75
63493+:10F050008F85001435190100972A00163158FFFCDE
63494+:10F06000270F000401E870218DCD400031A6FFFF7D
63495+:10F0700014C000072403BFFF3C02FFFF34487FFF9A
63496+:10F0800000E83824AF8700048F8500142403BFFFF5
63497+:10F090003C04800000E3582434830180A46B0026E4
63498+:10F0A000AC69002C10A0000300054C02A465001000
63499+:10F0B000A46900263C071000AC8701B803E00008F3
63500+:10F0C000000000008F990004240AFFFE032A382460
63501+:10F0D0000A001003AF87000427BDFFE88FA20028B5
63502+:10F0E00030A5FFFF30C6FFFFAFBF0010AF87000C99
63503+:10F0F000AF820014AF8000040E000FDBAF80000071
63504+:10F100008FBF001027BD001803E00008AF80001477
63505+:10F110003C06800034C4007034C701008C8A0000B3
63506+:10F1200090E500128F84000027BDFFF030A300FFA0
63507+:10F13000000318823082400010400037246500032D
63508+:10F140000005C8800326C0218F0E4000246F0004F4
63509+:10F15000000F6880AFAE000001A660218D8B4000DB
63510+:10F16000AFAB000494E900163128FFFC01063821FA
63511+:10F170008CE64000AFA600088FA9000800003021EF
63512+:10F18000000028213C07080024E701000A0010675E
63513+:10F19000240800089059000024A500012CAC000CA4
63514+:10F1A0000079C0210018788001E770218DCD000022
63515+:10F1B0001180000600CD302603A5102114A8FFF50C
63516+:10F1C00000051A005520FFF4905900003C0480000F
63517+:10F1D000348700703C0508008CA531048CE30000E6
63518+:10F1E0002CA2002010400009006A38230005488046
63519+:10F1F0003C0B0800256B3108012B402124AA00019B
63520+:10F20000AD0700003C010800AC2A310400C0102109
63521+:10F2100003E0000827BD0010308220001040000BE2
63522+:10F2200000055880016648218D24400024680004B0
63523+:10F2300000083880AFA4000000E618218C6540006B
63524+:10F24000AFA000080A001057AFA500040000000D91
63525+:10F250000A0010588FA9000827BDFFE03C07800076
63526+:10F2600034E60100AFBF001CAFB20018AFB100140C
63527+:10F27000AFB0001094C5000E8F87000030A4FFFFD0
63528+:10F280002483000430E2400010400010AF830028C7
63529+:10F290003C09002000E940241100000D30EC800002
63530+:10F2A0008F8A0004240BBFFF00EB38243543100085
63531+:10F2B000AF87000030F220001640000B3C1900041C
63532+:10F2C000241FFFBF0A0010B7007F102430EC80001D
63533+:10F2D000158000423C0E002030F220001240FFF862
63534+:10F2E0008F8300043C19000400F9C0241300FFF5CB
63535+:10F2F000241FFFBF34620040AF82000430E20100EF
63536+:10F300001040001130F010008F83002C10600006B8
63537+:10F310003C0F80003C05002000E52024148000C044
63538+:10F320003C0800043C0F800035EE010095CD001E26
63539+:10F3300095CC001C31AAFFFF000C5C00014B482556
63540+:10F34000AF89000C30F010001200000824110001F9
63541+:10F3500030F100201620008B3C18100000F890249B
63542+:10F36000164000823C040C002411000130E801002A
63543+:10F370001500000B3C0900018F85000430A94000F6
63544+:10F38000152000073C0900013C0C1F0100EC58242B
63545+:10F390003C0A1000116A01183C1080003C09000171
63546+:10F3A00000E9302410C000173C0B10003C18080086
63547+:10F3B0008F1800243307000214E0014024030001E9
63548+:10F3C0008FBF001C8FB200188FB100148FB00010D7
63549+:10F3D0000060102103E0000827BD002000EE682433
63550+:10F3E00011A0FFBE30F220008F8F00043C11FFFF00
63551+:10F3F00036307FFF00F0382435E380000A0010A685
63552+:10F40000AF87000000EB102450400065AF8000245F
63553+:10F410008F8C002C3C0D0F0000ED18241580008807
63554+:10F42000AF83001030E8010011000086938F0010B8
63555+:10F430003C0A0200106A00833C1280003650010032
63556+:10F44000920500139789002A3626000230AF00FF8C
63557+:10F4500025EE0004000E19C03C0480008C9801B811
63558+:10F460000700FFFE34880180AD0300003C198008CE
63559+:10F47000AC830020973100483225FFFF10A0015CCB
63560+:10F48000AF8500082523001200A3F82B53E0015993
63561+:10F490008F850004348D010095AC00202402001AF1
63562+:10F4A00030E44000318BFFFFA102000B108001927D
63563+:10F4B0002563FFFE00A3502B154001908F8F0004A1
63564+:10F4C000A50300148F88000435050001AF850004F2
63565+:10F4D0003C08800035190180A729000EA729001AD1
63566+:10F4E0008F89000C30B18000A7270010AF290028B9
63567+:10F4F000A72600081220000E3C04800035020100FF
63568+:10F50000944C0016318BFFFC256400040088182100
63569+:10F510008C7F400033E6FFFF14C000053C048000F0
63570+:10F520003C0AFFFF354D7FFF00AD2824AF85000466
63571+:10F53000240EBFFF00AE402434850180A4A800261D
63572+:10F54000ACA7002C3C071000AC8701B800001821C4
63573+:10F550008FBF001C8FB200188FB100148FB0001045
63574+:10F560000060102103E0000827BD00203C020BFFD3
63575+:10F5700000E41824345FFFFF03E3C82B5320FF7B14
63576+:10F58000241100013C0608008CC6002C24C5000193
63577+:10F590003C010800AC25002C0A0010D42411000501
63578+:10F5A0008F85002410A0002FAF80001090A30000D2
63579+:10F5B000146000792419000310A0002A30E601002D
63580+:10F5C00010C000CC8F860010241F000210DF00C97D
63581+:10F5D0008F8B000C3C0708008CE7003824E4FFFF09
63582+:10F5E00014E0000201641824000018213C0D0800FA
63583+:10F5F00025AD0038006D1021904C00048F85002847
63584+:10F6000025830004000321C030A5FFFF3626000239
63585+:10F610000E000FDB000000000A00114D0000182151
63586+:10F6200000E8302414C0FF403C0F80000E00103D65
63587+:10F63000000000008F8700000A0010CAAF82000C93
63588+:10F64000938F00103C18080127189640000F90C0B7
63589+:10F6500002588021AF9000248F85002414A0FFD38E
63590+:10F66000AF8F00103C0480008C86400030C5010044
63591+:10F6700010A000BC322300043C0C08008D8C002438
63592+:10F6800024120004106000C23190000D3C04800080
63593+:10F690008C8D40003402FFFF11A201003231FFFBCC
63594+:10F6A0008C884000310A01005540000124110010EF
63595+:10F6B00030EE080011C000BE2419FFFB8F9800280F
63596+:10F6C0002F0F03EF51E000010219802430E90100FF
63597+:10F6D00011200014320800018F87002C14E000FB79
63598+:10F6E0008F8C000C3C05800034AB0100917F00132F
63599+:10F6F00033E300FF246A00042403FFFE0203802496
63600+:10F70000000A21C012000002023230253226FFFF1B
63601+:10F710000E000FDB9785002A1200FF290000182138
63602+:10F72000320800011100000D32180004240E0001FF
63603+:10F73000120E0002023230253226FFFF9785002A82
63604+:10F740000E000FDB00002021240FFFFE020F80249B
63605+:10F750001200FF1B00001821321800045300FF188C
63606+:10F760002403000102323025241200045612000145
63607+:10F770003226FFFF9785002A0E000FDB24040100CC
63608+:10F780002419FFFB021988241220FF0D0000182104
63609+:10F790000A0010E9240300011079009C00003021C8
63610+:10F7A00090AD00012402000211A200BE30EA004028
63611+:10F7B00090B90001241800011338007F30E900409F
63612+:10F7C0008CA600049785002A00C020210E000FDBC4
63613+:10F7D0003626000200004021010018218FBF001CC6
63614+:10F7E0008FB200188FB100148FB00010006010218C
63615+:10F7F00003E0000827BD0020360F010095EE000C45
63616+:10F8000031CD020015A0FEE63C0900013C1880083D
63617+:10F81000971200489789002A362600023248FFFFD7
63618+:10F82000AF8800083C0380008C7101B80620FFFE01
63619+:10F83000346A0180AD4000001100008E3C0F800052
63620+:10F84000253F0012011FC82B1320008B240E00033C
63621+:10F85000346C0100958B00202402001A30E4400033
63622+:10F860003163FFFFA142000B108000A72463FFFE5D
63623+:10F870000103682B15A000A52408FFFE34A5000194
63624+:10F88000A5430014AF8500043C0480002412BFFF90
63625+:10F8900000B2802434850180A4A9000EA4A9001A16
63626+:10F8A000A4A60008A4B00026A4A700103C071000DE
63627+:10F8B000AC8701B80A00114D000018213C038000FC
63628+:10F8C00034640100949F000E3C1908008F3900D861
63629+:10F8D0002404008033E5FFFF273100013C010800CC
63630+:10F8E000AC3100D80E000FDB240600030A00114DD6
63631+:10F8F00000001821240A000210CA00598F85002830
63632+:10F900003C0308008C6300D0240E0001106E005EE2
63633+:10F910002CCF000C24D2FFFC2E5000041600002136
63634+:10F9200000002021241800021078001B2CD9000CA4
63635+:10F9300024DFFFF82FE900041520FF330000202109
63636+:10F9400030EB020051600004000621C054C00022C8
63637+:10F9500030A5FFFF000621C030A5FFFF0A00117D82
63638+:10F96000362600023C0908008D29002431300001B0
63639+:10F970005200FEF7000018219785002A3626000263
63640+:10F980000E000FDB000020210A00114D000018219D
63641+:10F990000A00119C241200021320FFE624DFFFF866
63642+:10F9A0000000202130A5FFFF0A00117D362600024D
63643+:10F9B0000A0011AC021980245120FF828CA6000499
63644+:10F9C0003C05080190A5964110A0FF7E2408000187
63645+:10F9D0000A0011F0010018210E000FDB3226000191
63646+:10F9E0008F8600108F8500280A00124F000621C064
63647+:10F9F0008F8500043C18800024120003371001801A
63648+:10FA0000A212000B0A00112E3C08800090A30001F6
63649+:10FA1000241100011071FF70240800012409000264
63650+:10FA20005069000430E60040240800010A0011F08B
63651+:10FA30000100182150C0FFFD240800013C0C80008B
63652+:10FA4000358B01009563001094A40002307FFFFF06
63653+:10FA5000509FFF62010018210A001284240800014F
63654+:10FA60002CA803EF1100FE56240300010A001239EE
63655+:10FA700000000000240E000335EA0180A14E000BB7
63656+:10FA80000A00121C3C04800011E0FFA2000621C005
63657+:10FA900030A5FFFF0A00117D362600020A0011A5DD
63658+:10FAA000241100201140FFC63C1280003650010096
63659+:10FAB000960F001094AE000231E80FFF15C8FFC08A
63660+:10FAC000000000000A0011E690B900013C060800A1
63661+:10FAD0008CC6003824C4FFFF14C00002018418241F
63662+:10FAE000000018213C0D080025AD0038006D1021E4
63663+:10FAF0000A0011B6904300048F8F0004240EFFFE0D
63664+:10FB00000A00112C01EE28242408FFFE0A00121A14
63665+:10FB100000A8282427BDFFC8AFB00010AFBF003435
63666+:10FB20003C10600CAFBE0030AFB7002CAFB6002861
63667+:10FB3000AFB50024AFB40020AFB3001CAFB20018C3
63668+:10FB4000AFB100148E0E5000240FFF7F3C068000E2
63669+:10FB500001CF682435AC380C240B0003AE0C5000E8
63670+:10FB6000ACCB00083C010800AC2000200E001819A6
63671+:10FB7000000000003C0A0010354980513C06601628
63672+:10FB8000AE09537C8CC700003C0860148D0500A0B2
63673+:10FB90003C03FFFF00E320243C02535300051FC237
63674+:10FBA0001482000634C57C000003A08002869821E0
63675+:10FBB0008E7200043C116000025128218CBF007C31
63676+:10FBC0008CA200783C1E600037C420203C05080150
63677+:10FBD00024A59288AF820018AF9F001C0E0016DD8E
63678+:10FBE0002406000A3C190001273996403C01080010
63679+:10FBF000AC3931DC0E0020DDAF8000148FD708084F
63680+:10FC00002418FFF03C15570902F8B02412D502F56C
63681+:10FC100024040001AF80002C3C1480003697018042
63682+:10FC20003C1E080127DE9644369301008E900000AA
63683+:10FC30003205000310A0FFFD3207000110E000882C
63684+:10FC4000320600028E7100283C048000AE91002034
63685+:10FC50008E6500048E66000000A0382100C040219F
63686+:10FC60008C8301B80460FFFE3C0B0010240A0800DE
63687+:10FC700000AB4824AC8A01B8552000E0240BBFFF3C
63688+:10FC80009675000E3C1208008E52002030AC4000E9
63689+:10FC900032AFFFFF264E000125ED00043C010800B5
63690+:10FCA000AC2E0020118000E8AF8D00283C18002009
63691+:10FCB00000B8B02412C000E530B980002408BFFFAE
63692+:10FCC00000A8382434C81000AF87000030E62000B8
63693+:10FCD00010C000E92409FFBF3C03000400E328240E
63694+:10FCE00010A00002010910243502004030EA010092
63695+:10FCF00011400010AF8200048F8B002C11600007B0
63696+:10FD00003C0D002000ED6024118000043C0F000435
63697+:10FD100000EF702411C00239000000009668001E38
63698+:10FD20009678001C3115FFFF0018B40002B690252C
63699+:10FD3000AF92000C30F910001320001324150001BD
63700+:10FD400030FF002017E0000A3C04100000E41024FB
63701+:10FD50001040000D3C0A0C003C090BFF00EA18247F
63702+:10FD60003525FFFF00A3302B10C0000830ED010047
63703+:10FD70003C0C08008D8C002C24150005258B0001FF
63704+:10FD80003C010800AC2B002C30ED010015A0000B4D
63705+:10FD90003C0500018F85000430AE400055C00007CF
63706+:10FDA0003C0500013C161F0100F690243C0F10009A
63707+:10FDB000124F01CE000000003C05000100E5302498
63708+:10FDC00010C000AF3C0C10003C1F08008FFF002447
63709+:10FDD00033E90002152000712403000100601021A6
63710+:10FDE000104000083C0680003C08800035180100E7
63711+:10FDF0008F0F00243C056020ACAF00140000000011
63712+:10FE00003C0680003C194000ACD9013800000000DD
63713+:10FE10005220001332060002262B0140262C0080BF
63714+:10FE2000240EFF80016E2024018E6824000D1940ED
63715+:10FE3000318A007F0004A9403172007F3C16200007
63716+:10FE400036C20002006A482502B2382500E2882541
63717+:10FE50000122F825ACDF0830ACD1083032060002B0
63718+:10FE600010C0FF723C188000370501408CA80000CC
63719+:10FE700024100040AF08002090AF000831E300706C
63720+:10FE8000107000D428790041532000082405006038
63721+:10FE9000241100201071000E3C0A40003C09800033
63722+:10FEA000AD2A01780A001304000000001465FFFB6E
63723+:10FEB0003C0A40000E001FFA000000003C0A40000F
63724+:10FEC0003C098000AD2A01780A00130400000000FC
63725+:10FED00090A90009241F00048CA70000312800FF0E
63726+:10FEE000111F01B22503FFFA2C7200061240001404
63727+:10FEF0003C0680008CA9000494A4000A310500FF90
63728+:10FF000000095E022D6A00083086FFFF15400002DE
63729+:10FF10002567000424070003240C000910AC01FA33
63730+:10FF200028AD000A11A001DE2410000A240E0008EA
63731+:10FF300010AE0028000731C000C038213C06800008
63732+:10FF40008CD501B806A0FFFE34D20180AE47000078
63733+:10FF500034CB0140916E0008240300023C0A4000AB
63734+:10FF600031C400FF00046A0001A86025A64C000807
63735+:10FF7000A243000B9562000A3C0810003C09800077
63736+:10FF8000A64200108D670004AE470024ACC801B83B
63737+:10FF9000AD2A01780A001304000000003C0A80002A
63738+:10FFA000354401009483000E3C0208008C4200D8C6
63739+:10FFB000240400803065FFFF245500013C01080047
63740+:10FFC000AC3500D80E000FDB240600030A001370C6
63741+:10FFD000000018210009320230D900FF2418000166
63742+:10FFE0001738FFD5000731C08F910020262200016D
63743+:10FFF000AF8200200A0013C800C0382100CB2024A3
63744+:020000021000EC
63745+:10000000AF85000010800008AF860004240D87FF34
63746+:1000100000CD6024158000083C0E006000AE302446
63747+:1000200010C00005000000000E000D42000000009E
63748+:100030000A001371000000000E0016050000000009
63749+:100040000A0013710000000030B980005320FF1F28
63750+:10005000AF8500003C02002000A2F82453E0FF1B03
63751+:10006000AF8500003C07FFFF34E47FFF00A4382485
63752+:100070000A00132B34C880000A001334010910242D
63753+:1000800000EC58245160005AAF8000248F8D002C62
63754+:100090003C0E0F0000EE182415A00075AF83001071
63755+:1000A00030EF010011E00073939800103C12020041
63756+:1000B000107200703C06800034D9010093280013B0
63757+:1000C0009789002A36A60002311800FF271600047F
63758+:1000D000001619C03C0480008C8501B804A0FFFE06
63759+:1000E00034880180AD0300003C158008AC830020FB
63760+:1000F00096BF004833E5FFFF10A001BCAF850008A4
63761+:100100002523001200A3102B504001B98F85000455
63762+:10011000348D010095AC0020240B001A30E440001F
63763+:10012000318AFFFFA10B000B108001BA2543FFFEAF
63764+:1001300000A3702B15C001B88F9600048F8F0004A8
63765+:10014000A503001435E50001AF8500043C088000DC
63766+:1001500035150180A6A9000EA6A9001A8F89000CEA
63767+:1001600030BF8000A6A70010AEA90028A6A60008F0
63768+:1001700013E0000F3C0F8000350C0100958B00163A
63769+:10018000316AFFFC25440004008818218C6240007D
63770+:100190003046FFFF14C000072416BFFF3C0EFFFFD0
63771+:1001A00035CD7FFF00AD2824AF8500043C0F8000D3
63772+:1001B0002416BFFF00B6902435E50180A4B20026C6
63773+:1001C000ACA7002C3C071000ADE701B80A00137083
63774+:1001D000000018210E00165D000000003C0A4000DF
63775+:1001E0003C098000AD2A01780A00130400000000D9
63776+:1001F0008F85002410A00027AF80001090A300007E
63777+:10020000106000742409000310690101000030210E
63778+:1002100090AE0001240D000211CD014230EF0040EC
63779+:1002200090A90001241F0001113F000930E20040A5
63780+:100230008CA600049785002A00C020210E000FDB49
63781+:1002400036A60002000040210A00137001001821A8
63782+:100250005040FFF88CA600043C07080190E7964147
63783+:1002600010E0FFF4240800010A00137001001821B7
63784+:10027000939800103C1F080127FF96400018C8C043
63785+:10028000033F4021AF8800248F85002414A0FFDBAA
63786+:10029000AF9800103C0480008C86400030C50100FF
63787+:1002A00010A0008732AB00043C0C08008D8C0024A9
63788+:1002B00024160004156000033192000D241600027C
63789+:1002C0003C0480008C8E4000340DFFFF11CD0113E3
63790+:1002D00032B5FFFB8C984000330F010055E0000160
63791+:1002E0002415001030E80800110000382409FFFB35
63792+:1002F0008F9F00282FF903EF53200001024990241B
63793+:1003000030E2010010400014325F00018F87002CA2
63794+:1003100014E0010E8F8C000C3C0480003486010038
63795+:1003200090C5001330AA00FF25430004000321C03C
63796+:100330002419FFFE025990241240000202B6302513
63797+:1003400032A6FFFF0E000FDB9785002A1240FEA3A6
63798+:1003500000001821325F000113E0000D3247000455
63799+:10036000240900011249000202B6302532A6FFFF1F
63800+:100370009785002A0E000FDB000020212402FFFEDB
63801+:10038000024290241240FE950000182132470004DA
63802+:1003900050E0FE922403000102B63025241600042A
63803+:1003A0005656000132A6FFFF9785002A0E000FDB8C
63804+:1003B000240401002403FFFB0243A82412A0FE87AB
63805+:1003C000000018210A001370240300010A0014B968
63806+:1003D0000249902410A0FFAF30E5010010A00017E3
63807+:1003E0008F8600102403000210C300148F84000CB9
63808+:1003F0003C0608008CC6003824CAFFFF14C0000267
63809+:10040000008A1024000010213C0E080025CE003880
63810+:10041000004E682191AC00048F850028258B0004D4
63811+:10042000000B21C030A5FFFF36A600020E000FDB37
63812+:10043000000000000A00137000001821240F0002C1
63813+:1004400010CF0088241600013C0308008C6300D004
63814+:100450001076008D8F85002824D9FFFC2F280004FA
63815+:100460001500006300002021241F0002107F005DA2
63816+:100470002CC9000C24C3FFF82C6200041440FFE9CF
63817+:100480000000202130EA020051400004000621C093
63818+:1004900054C0000530A5FFFF000621C030A5FFFFB6
63819+:1004A0000A00150436A600020E000FDB32A600017A
63820+:1004B0008F8600108F8500280A001520000621C0B5
63821+:1004C0003C0A08008D4A0024315200015240FE438C
63822+:1004D000000018219785002A36A600020E000FDBC7
63823+:1004E000000020210A001370000018219668000CFB
63824+:1004F000311802005700FE313C0500013C1F800806
63825+:1005000097F900489789002A36A600023328FFFF92
63826+:10051000AF8800083C0380008C7501B806A0FFFE80
63827+:100520003C04800034820180AC400000110000B621
63828+:1005300024180003252A0012010A182B106000B2AB
63829+:1005400000000000966F00203C0E8000240D001A71
63830+:1005500031ECFFFF35CA018030EB4000A14D000BAC
63831+:10056000116000B02583FFFE0103902B164000AE02
63832+:100570002416FFFE34A50001A5430014AF85000436
63833+:100580002419BFFF00B94024A6E9000EA6E9001A0D
63834+:10059000A6E60008A6E80026A6E700103C07100023
63835+:1005A000AE8701B80A001370000018213C048000D7
63836+:1005B0008C8201B80440FFFE349601802415001C93
63837+:1005C000AEC70000A2D5000B3C071000AC8701B8F5
63838+:1005D0003C0A40003C098000AD2A01780A0013045F
63839+:1005E000000000005120FFA424C3FFF800002021D8
63840+:1005F00030A5FFFF0A00150436A600020E00103DCC
63841+:10060000000000008F8700000A001346AF82000C34
63842+:1006100090A30001241500011075FF0B24080001B0
63843+:10062000240600021066000430E2004024080001A5
63844+:100630000A001370010018215040FFFD240800013A
63845+:100640003C0C8000358B0100956A001094A40002D8
63846+:100650003143FFFF5083FDE1010018210A00158599
63847+:10066000240800018F8500282CB203EF1240FDDB27
63848+:10067000240300013C0308008C6300D02416000111
63849+:100680001476FF7624D9FFFC2CD8000C1300FF72DF
63850+:10069000000621C030A5FFFF0A00150436A600029F
63851+:1006A00010B00037240F000B14AFFE23000731C039
63852+:1006B000312600FF00065600000A4E0305220047BF
63853+:1006C00030C6007F0006F8C03C16080126D69640CA
63854+:1006D00003F68021A2000001A20000003C0F600090
63855+:1006E0008DF918202405000100C588040011302769
63856+:1006F0000326C024000731C000C03821ADF81820FF
63857+:100700000A0013C8A60000028F850020000731C030
63858+:1007100024A2FFFF0A0013F6AF8200200A0014B2E1
63859+:100720002415002011E0FECC3C1980003728010080
63860+:100730009518001094B6000233120FFF16D2FEC6B1
63861+:10074000000000000A00148290A900013C0B080080
63862+:100750008D6B0038256DFFFF15600002018D1024A0
63863+:10076000000010213C080800250800380048C0217E
63864+:10077000930F000425EE00040A0014C5000E21C0EA
63865+:1007800000065202241F00FF115FFDEB000731C07D
63866+:10079000000A20C03C0E080125CE9640008EA821FC
63867+:1007A000009E602100095C02240D00013C076000EE
63868+:1007B000A2AD0000AD860000A2AB00018CF21820B3
63869+:1007C00024030001014310040242B025ACF61820B6
63870+:1007D00000C038210A0013C8A6A900020A0015AA01
63871+:1007E000AF8000200A0012FFAF84002C8F85000428
63872+:1007F0003C1980002408000337380180A308000B4F
63873+:100800000A00144D3C088000A2F8000B0A00155A9B
63874+:100810002419BFFF8F9600042412FFFE0A00144B18
63875+:1008200002D228242416FFFE0A00155800B62824F8
63876+:100830003C038000346401008C85000030A2003E3F
63877+:100840001440000800000000AC6000488C870000E5
63878+:1008500030E607C010C0000500000000AC60004C8E
63879+:10086000AC60005003E0000824020001AC600054BA
63880+:10087000AC6000408C880000310438001080FFF923
63881+:10088000000000002402000103E00008AC60004406
63882+:100890003C0380008C6201B80440FFFE3467018095
63883+:1008A000ACE4000024080001ACE00004A4E500086A
63884+:1008B00024050002A0E8000A34640140A0E5000B12
63885+:1008C0009483000A14C00008A4E30010ACE00024E4
63886+:1008D0003C07800034E901803C041000AD20002872
63887+:1008E00003E00008ACE401B88C8600043C0410006E
63888+:1008F000ACE600243C07800034E90180AD200028EC
63889+:1009000003E00008ACE401B83C0680008CC201B8EA
63890+:100910000440FFFE34C7018024090002ACE400005B
63891+:10092000ACE40004A4E50008A0E9000A34C50140D5
63892+:10093000A0E9000B94A8000A3C041000A4E80010F1
63893+:10094000ACE000248CA30004ACE3002803E0000822
63894+:10095000ACC401B83C039000346200010082202541
63895+:100960003C038000AC6400208C65002004A0FFFEE6
63896+:100970000000000003E00008000000003C028000CE
63897+:10098000344300010083202503E00008AC4400202C
63898+:1009900027BDFFE03C098000AFBF0018AFB10014D5
63899+:1009A000AFB00010352801408D10000091040009FF
63900+:1009B0009107000891050008308400FF30E600FF31
63901+:1009C00000061A002C820081008330251040002A86
63902+:1009D00030A50080000460803C0D080125AD92B078
63903+:1009E000018D58218D6A00000140000800000000C0
63904+:1009F0003C038000346201409445000A14A0001EAC
63905+:100A00008F91FCC09227000530E6000414C0001A44
63906+:100A1000000000000E00164E02002021922A000560
63907+:100A200002002021354900040E001658A2290005B5
63908+:100A30009228000531040004148000020000000028
63909+:100A40000000000D922D0000240B002031AC00FFAF
63910+:100A5000158B00093C0580008CAE01B805C0FFFE77
63911+:100A600034B10180AE3000003C0F100024100005AE
63912+:100A7000A230000BACAF01B80000000D8FBF001812
63913+:100A80008FB100148FB0001003E0000827BD0020D4
63914+:100A90000200202100C028218FBF00188FB1001450
63915+:100AA0008FB00010240600010A00161D27BD00208B
63916+:100AB0000000000D0200202100C028218FBF001877
63917+:100AC0008FB100148FB00010000030210A00161DF5
63918+:100AD00027BD002014A0FFE8000000000200202134
63919+:100AE0008FBF00188FB100148FB0001000C02821F4
63920+:100AF0000A00163B27BD00203C0780008CEE01B8A1
63921+:100B000005C0FFFE34F00180241F0002A21F000B6D
63922+:100B100034F80140A60600089719000A3C0F10009F
63923+:100B2000A61900108F110004A6110012ACEF01B835
63924+:100B30000A0016998FBF001827BDFFE8AFBF00104D
63925+:100B40000E000FD4000000003C0280008FBF001098
63926+:100B500000002021AC4001800A00108F27BD001842
63927+:100B60003084FFFF30A5FFFF108000070000182130
63928+:100B7000308200011040000200042042006518216C
63929+:100B80001480FFFB0005284003E0000800601021EE
63930+:100B900010C00007000000008CA2000024C6FFFF68
63931+:100BA00024A50004AC82000014C0FFFB24840004D0
63932+:100BB00003E000080000000010A0000824A3FFFFCD
63933+:100BC000AC86000000000000000000002402FFFFCF
63934+:100BD0002463FFFF1462FFFA2484000403E000088A
63935+:100BE000000000003C03800027BDFFF83462018054
63936+:100BF000AFA20000308C00FF30AD00FF30CE00FF10
63937+:100C00003C0B80008D6401B80480FFFE00000000F2
63938+:100C10008FA900008D6801288FAA00008FA700000F
63939+:100C20008FA400002405000124020002A085000A10
63940+:100C30008FA30000359940003C051000A062000B16
63941+:100C40008FB800008FAC00008FA600008FAF0000AF
63942+:100C500027BD0008AD280000AD400004AD80002491
63943+:100C6000ACC00028A4F90008A70D0010A5EE0012E2
63944+:100C700003E00008AD6501B83C06800827BDFFE829
63945+:100C800034C50080AFBF001090A7000924020012F5
63946+:100C900030E300FF1062000B008030218CA8005070
63947+:100CA00000882023048000088FBF00108CAA003425
63948+:100CB000240400390000282100CA4823052000052B
63949+:100CC000240600128FBF00102402000103E0000878
63950+:100CD00027BD00180E0016F2000000008FBF0010A4
63951+:100CE0002402000103E0000827BD001827BDFFC84B
63952+:100CF000AFB20030AFB00028AFBF0034AFB1002CAE
63953+:100D000000A0802190A5000D30A6001010C000109A
63954+:100D1000008090213C0280088C4400048E0300086F
63955+:100D20001064000C30A7000530A6000510C0009329
63956+:100D3000240400018FBF00348FB200308FB1002C2B
63957+:100D40008FB000280080102103E0000827BD003884
63958+:100D500030A7000510E0000F30AB001210C00006F5
63959+:100D6000240400013C0980088E0800088D25000439
63960+:100D70005105009C240400388FBF00348FB200302E
63961+:100D80008FB1002C8FB000280080102103E00008F4
63962+:100D900027BD0038240A0012156AFFE6240400016A
63963+:100DA0000200202127A500100E000CB6AFA00010F5
63964+:100DB0001440007C3C19800837240080909800087B
63965+:100DC000331100081220000A8FA7001030FF010025
63966+:100DD00013E000A48FA300148C8600580066102333
63967+:100DE000044000043C0A8008AC8300588FA7001020
63968+:100DF0003C0A800835480080910900083124000829
63969+:100E00001480000224080003000040213C1F8008D9
63970+:100E100093F1001193F9001237E600808CCC005456
63971+:100E2000333800FF03087821322D00FF000F708057
63972+:100E300001AE282100AC582B1160006F00000000AB
63973+:100E400094CA005C8CC900543144FFFF0125102373
63974+:100E50000082182B14600068000000008CCB005446
63975+:100E60000165182330EC00041180006C000830800C
63976+:100E70008FA8001C0068102B1040006230ED0004A9
63977+:100E8000006610232C46008010C00002004088211C
63978+:100E9000241100800E00164E024020213C0D8008D7
63979+:100EA00035A6008024070001ACC7000C90C80008DC
63980+:100EB0000011484035A70100310C007FA0CC00088C
63981+:100EC0008E05000424AB0001ACCB0030A4D1005C43
63982+:100ED0008CCA003C9602000E01422021ACC40020C6
63983+:100EE0008CC3003C0069F821ACDF001C8E190004A3
63984+:100EF000ACF900008E180008ACF800048FB10010A7
63985+:100F0000322F000855E0004793A60020A0C0004EF5
63986+:100F100090D8004E2411FFDFA0F8000890CF000801
63987+:100F200001F17024A0CE00088E0500083C0B80085B
63988+:100F300035690080AD2500388D6A00148D2200309F
63989+:100F40002419005001422021AD24003491230000D7
63990+:100F5000307F00FF13F90036264F01000E001658AF
63991+:100F60000240202124040038000028210E0016F23F
63992+:100F70002406000A0A001757240400010E000D2859
63993+:100F8000000020218FBF00348FB200308FB1002CC1
63994+:100F90008FB00028004020210080102103E00008CD
63995+:100FA00027BD00388E0E00083C0F800835F0008009
63996+:100FB000AE0E005402402021AE0000300E00164E4E
63997+:100FC00000000000920D00250240202135AC0020D9
63998+:100FD0000E001658A20C00250E000CAC0240202179
63999+:100FE000240400382405008D0E0016F22406001299
64000+:100FF0000A0017572404000194C5005C0A001792E8
64001+:1010000030A3FFFF2407021811A0FF9E00E6102363
64002+:101010008FAE001C0A00179A01C610230A0017970A
64003+:101020002C620218A0E600080A0017C48E0500080A
64004+:101030002406FF8001E6C0243C118000AE38002861
64005+:101040008E0D000831E7007F3C0E800C00EE602121
64006+:10105000AD8D00E08E080008AF8C00380A0017D074
64007+:10106000AD8800E4AC800058908500082403FFF7A9
64008+:1010700000A33824A08700080A0017758FA7001066
64009+:101080003C05080024A560A83C04080024846FF4F3
64010+:101090003C020800244260B0240300063C01080121
64011+:1010A000AC2596C03C010801AC2496C43C01080163
64012+:1010B000AC2296C83C010801A02396CC03E00008AE
64013+:1010C0000000000003E00008240200013C02800050
64014+:1010D000308800FF344701803C0680008CC301B893
64015+:1010E0000460FFFE000000008CC501282418FF806A
64016+:1010F0003C0D800A24AF010001F8702431EC007F20
64017+:10110000ACCE0024018D2021ACE50000948B00EAD8
64018+:101110003509600024080002316AFFFFACEA0004D0
64019+:1011200024020001A4E90008A0E8000BACE00024C0
64020+:101130003C071000ACC701B8AF84003803E00008DA
64021+:10114000AF85006C938800488F8900608F820038DB
64022+:1011500030C600FF0109382330E900FF01221821C1
64023+:1011600030A500FF2468008810C000020124382147
64024+:101170000080382130E400031480000330AA00030B
64025+:101180001140000D312B000310A0000900001021B8
64026+:1011900090ED0000244E000131C200FF0045602B9D
64027+:1011A000A10D000024E700011580FFF925080001CA
64028+:1011B00003E00008000000001560FFF300000000DD
64029+:1011C00010A0FFFB000010218CF80000245900043F
64030+:1011D000332200FF0045782BAD18000024E70004FF
64031+:1011E00015E0FFF92508000403E0000800000000F6
64032+:1011F00093850048938800588F8700600004320070
64033+:101200003103007F00E5102B30C47F001040000F39
64034+:10121000006428258F8400383C0980008C8A00EC0B
64035+:10122000AD2A00A43C03800000A35825AC6B00A0AD
64036+:101230008C6C00A00580FFFE000000008C6D00ACEF
64037+:10124000AC8D00EC03E000088C6200A80A00188254
64038+:101250008F840038938800593C0280000080502120
64039+:10126000310300FEA383005930ABFFFF30CC00FFF9
64040+:1012700030E7FFFF344801803C0980008D2401B82D
64041+:101280000480FFFE8F8D006C24180016AD0D000049
64042+:101290008D2201248F8D0038AD0200048D5900206D
64043+:1012A000A5070008240201C4A119000AA118000B17
64044+:1012B000952F01208D4E00088D4700049783005C18
64045+:1012C0008D59002401CF302100C7282100A32023FD
64046+:1012D0002418FFFFA504000CA50B000EA5020010AA
64047+:1012E000A50C0012AD190018AD18002495AF00E848
64048+:1012F0003C0B10002407FFF731EEFFFFAD0E002876
64049+:101300008DAC0084AD0C002CAD2B01B88D460020B7
64050+:1013100000C7282403E00008AD4500208F8800386E
64051+:101320000080582130E7FFFF910900D63C02800081
64052+:1013300030A5FFFF312400FF00041A00006750258C
64053+:1013400030C600FF344701803C0980008D2C01B875
64054+:101350000580FFFE8F82006C240F0017ACE20000B6
64055+:101360008D390124ACF900048D780020A4EA00082E
64056+:10137000241901C4A0F8000AA0EF000B9523012056
64057+:101380008D6E00088D6D00049784005C01C35021B0
64058+:10139000014D602101841023A4E2000CA4E5000E9D
64059+:1013A000A4F90010A4E60012ACE000148D7800242B
64060+:1013B000240DFFFFACF800188D0F007CACEF001C73
64061+:1013C0008D0E00783C0F1000ACEE0020ACED002438
64062+:1013D000950A00BE240DFFF73146FFFFACE600285A
64063+:1013E000950C00809504008231837FFF0003CA00C2
64064+:1013F0003082FFFF0322C021ACF8002CAD2F01B8D2
64065+:10140000950E00828D6A002000AE3021014D282407
64066+:10141000A506008203E00008AD6500203C028000C4
64067+:10142000344501803C0480008C8301B80460FFFED9
64068+:101430008F8A0044240600199549001C3128FFFFBB
64069+:10144000000839C0ACA70000A0A6000B3C051000A6
64070+:1014500003E00008AC8501B88F87004C0080402174
64071+:1014600030C400FF3C0680008CC201B80440FFFE7F
64072+:101470008F89006C9383006834996000ACA90000E8
64073+:10148000A0A300058CE20010240F00022403FFF744
64074+:10149000A4A20006A4B900088D180020A0B8000A74
64075+:1014A000A0AF000B8CEE0000ACAE00108CED000481
64076+:1014B000ACAD00148CEC001CACAC00248CEB002018
64077+:1014C000ACAB00288CEA002C3C071000ACAA002C26
64078+:1014D0008D090024ACA90018ACC701B88D05002007
64079+:1014E00000A3202403E00008AD0400208F8600380C
64080+:1014F00027BDFFE0AFB10014AFBF0018AFB00010C0
64081+:1015000090C300D430A500FF3062002010400008D6
64082+:10151000008088218CCB00D02409FFDF256A0001E0
64083+:10152000ACCA00D090C800D401093824A0C700D4A8
64084+:1015300014A000403C0C80008F840038908700D4B9
64085+:101540002418FFBF2406FFEF30E3007FA08300D400
64086+:10155000979F005C8F8200608F8D003803E2C82364
64087+:10156000A799005CA5A000BC91AF00D401F870243D
64088+:10157000A1AE00D48F8C0038A18000D78F8A0038AC
64089+:10158000A5400082AD4000EC914500D400A658244F
64090+:10159000A14B00D48F9000348F8400609786005C4C
64091+:1015A0000204282110C0000FAF850034A38000582A
64092+:1015B0003C0780008E2C000894ED01208E2B000447
64093+:1015C000018D5021014B8021020620233086FFFF30
64094+:1015D00030C8000F3909000131310001162000091F
64095+:1015E000A3880058938600488FBF00188FB100145D
64096+:1015F0008FB0001027BD0020AF85006403E0000815
64097+:10160000AF86006000C870238FBF00189386004823
64098+:101610008FB100148FB0001034EF0C00010F28219F
64099+:1016200027BD0020ACEE0084AF85006403E0000815
64100+:10163000AF86006035900180020028210E00190F4E
64101+:10164000240600828F840038908600D430C5004084
64102+:1016500050A0FFBAA38000688F85004C3C06800034
64103+:101660008CCD01B805A0FFFE8F89006C2408608234
64104+:1016700024070002AE090000A6080008A207000B1C
64105+:101680008CA300083C0E1000AE0300108CA2000CCE
64106+:10169000AE0200148CBF0014AE1F00188CB90018E5
64107+:1016A000AE1900248CB80024AE1800288CAF002896
64108+:1016B000AE0F002CACCE01B80A001948A380006818
64109+:1016C0008F8A003827BDFFE0AFB10014AFB0001023
64110+:1016D0008F880060AFBF00189389003C954200BC22
64111+:1016E00030D100FF0109182B0080802130AC00FFB1
64112+:1016F0003047FFFF0000582114600003310600FF4F
64113+:1017000001203021010958239783005C0068202BB9
64114+:101710001480002700000000106800562419000102
64115+:101720001199006334E708803165FFFF0E0018C08F
64116+:10173000020020218F83006C3C07800034E601808A
64117+:101740003C0580008CAB01B80560FFFE240A001840
64118+:101750008F840038ACC30000A0CA000B948900BE7F
64119+:101760003C081000A4C90010ACC00030ACA801B8FF
64120+:101770009482008024430001A4830080949F008011
64121+:101780003C0608008CC6318833EC7FFF1186005E72
64122+:101790000000000002002021022028218FBF001835
64123+:1017A0008FB100148FB000100A00193427BD00203B
64124+:1017B000914400D42403FF8000838825A15100D4E4
64125+:1017C0009784005C3088FFFF51000023938C003C1D
64126+:1017D0008F8500382402EFFF008B782394AE00BC85
64127+:1017E0000168502B31E900FF01C26824A4AD00BCA0
64128+:1017F00051400039010058213C1F800037E60100AC
64129+:101800008CD800043C190001031940245500000144
64130+:1018100034E740008E0A00202403FFFB241100015E
64131+:1018200001432024AE0400201191002D34E78000F4
64132+:1018300002002021012030210E0018C03165FFFF79
64133+:101840009787005C8F890060A780005C0127802358
64134+:10185000AF900060938C003C8F8B00388FBF0018D6
64135+:101860008FB100148FB0001027BD002003E00008E6
64136+:10187000A16C00D73C0D800035AA01008D48000402
64137+:101880003C0900010109282454A0000134E740006C
64138+:101890008E0F00202418FFFB34E7800001F870242D
64139+:1018A00024190001AE0E00201599FF9F34E708802F
64140+:1018B000020020210E00188E3165FFFF020020215A
64141+:1018C000022028218FBF00188FB100148FB00010A4
64142+:1018D0000A00193427BD00200A0019F7000048212A
64143+:1018E00002002021012030210E00188E3165FFFFFB
64144+:1018F0009787005C8F890060A780005C01278023A8
64145+:101900000A001A0EAF900060948C0080241F8000A3
64146+:10191000019F3024A4860080908B0080908F0080EF
64147+:10192000316700FF0007C9C20019C027001871C045
64148+:1019300031ED007F01AE2825A08500800A0019DF67
64149+:1019400002002021938500682403000127BDFFE8E1
64150+:1019500000A330042CA20020AFB00010AFBF0014D1
64151+:1019600000C01821104000132410FFFE3C0708009F
64152+:101970008CE7319000E610243C088000350501809A
64153+:1019800014400005240600848F890038240A0004CE
64154+:101990002410FFFFA12A00FC0E00190F0000000018
64155+:1019A000020010218FBF00148FB0001003E0000868
64156+:1019B00027BD00183C0608008CC631940A001A574F
64157+:1019C00000C310248F87004427BDFFE0AFB200188A
64158+:1019D000AFB10014AFB00010AFBF001C30D000FF9B
64159+:1019E00090E6000D00A088210080902130C5007F86
64160+:1019F000A0E5000D8F8500388E2300188CA200D042
64161+:101A00001062002E240A000E0E001A4AA38A0068F3
64162+:101A10002409FFFF104900222404FFFF5200002088
64163+:101A2000000020218E2600003C0C001000CC582421
64164+:101A3000156000393C0E000800CE682455A0003F18
64165+:101A4000024020213C18000200D880241200001F10
64166+:101A50003C0A00048F8700448CE200148CE30010E1
64167+:101A60008CE500140043F82303E5C82B1320000580
64168+:101A7000024020218E24002C8CF1001010910031A6
64169+:101A80000240202124020012A38200680E001A4A9C
64170+:101A90002412FFFF105200022404FFFF0000202147
64171+:101AA0008FBF001C8FB200188FB100148FB00010D0
64172+:101AB0000080102103E0000827BD002090A800D47A
64173+:101AC000350400200A001A80A0A400D400CA4824CB
64174+:101AD0001520000B8F8B00448F8D00448DAC0010BF
64175+:101AE0001580000B024020218E2E002C51C0FFECEF
64176+:101AF00000002021024020210A001A9B2402001726
64177+:101B00008D66001050C0FFE6000020210240202119
64178+:101B10000A001A9B24020011024020212402001511
64179+:101B20000E001A4AA3820068240FFFFF104FFFDC4B
64180+:101B30002404FFFF0A001A8A8E2600000A001AC138
64181+:101B4000240200143C08000400C8382450E0FFD4EC
64182+:101B500000002021024020210A001A9B24020013C9
64183+:101B60008F85003827BDFFD8AFB3001CAFB2001877
64184+:101B7000AFB10014AFB00010AFBF002090A700D4E9
64185+:101B80008F90004C2412FFFF34E2004092060000C8
64186+:101B9000A0A200D48E0300100080982110720006CD
64187+:101BA00030D1003F2408000D0E001A4AA3880068B7
64188+:101BB000105200252404FFFF8F8A00388E09001878
64189+:101BC0008D4400D01124000702602021240C000E57
64190+:101BD0000E001A4AA38C0068240BFFFF104B001A5A
64191+:101BE0002404FFFF24040020122400048F8D0038F9
64192+:101BF00091AF00D435EE0020A1AE00D48F85005403
64193+:101C000010A00019000000001224004A8F9800382C
64194+:101C10008F92FCC0971000809651000A5230004805
64195+:101C20008F9300403C1F08008FFF318C03E5C82BC9
64196+:101C30001720001E02602021000028210E0019A993
64197+:101C400024060001000020218FBF00208FB3001C5C
64198+:101C50008FB200188FB100148FB0001000801021D7
64199+:101C600003E0000827BD00285224002A8E05001436
64200+:101C70008F840038948A008025490001A48900805F
64201+:101C8000948800803C0208008C42318831077FFF35
64202+:101C900010E2000E00000000026020210E00193446
64203+:101CA000240500010A001B0B000020212402002D46
64204+:101CB0000E001A4AA38200682403FFFF1443FFE1C9
64205+:101CC0002404FFFF0A001B0C8FBF002094990080A2
64206+:101CD000241F800024050001033FC024A498008035
64207+:101CE00090920080908E0080325100FF001181C2DE
64208+:101CF00000107827000F69C031CC007F018D582576
64209+:101D0000A08B00800E001934026020210A001B0BFA
64210+:101D1000000020212406FFFF54A6FFD68F84003840
64211+:101D2000026020210E001934240500010A001B0B5B
64212+:101D300000002021026020210A001B252402000A45
64213+:101D40002404FFFD0A001B0BAF9300608F8800384E
64214+:101D500027BDFFE8AFB00010AFBF0014910A00D458
64215+:101D60008F87004C00808021354900408CE60010B0
64216+:101D7000A10900D43C0208008C4231B030C53FFFBD
64217+:101D800000A2182B106000078F850050240DFF80E3
64218+:101D900090AE000D01AE6024318B00FF156000088D
64219+:101DA0000006C382020020212403000D8FBF00140F
64220+:101DB0008FB0001027BD00180A001A4AA3830068DC
64221+:101DC00033060003240F000254CFFFF70200202146
64222+:101DD00094A2001C8F85003824190023A4A200E8D7
64223+:101DE0008CE8000000081E02307F003F13F9003528
64224+:101DF0003C0A00838CE800188CA600D0110600086D
64225+:101E0000000000002405000E0E001A4AA385006899
64226+:101E10002407FFFF104700182404FFFF8F850038B8
64227+:101E200090A900D435240020A0A400D48F8C0044B5
64228+:101E3000918E000D31CD007FA18D000D8F83005458
64229+:101E40001060001C020020218F8400508C9800102C
64230+:101E50000303782B11E0000D241900180200202143
64231+:101E6000A39900680E001A4A2410FFFF10500002C8
64232+:101E70002404FFFF000020218FBF00148FB000104A
64233+:101E80000080102103E0000827BD00188C86001098
64234+:101E90008F9F00440200202100C31023AFE20010F6
64235+:101EA000240500010E0019A9240600010A001B9751
64236+:101EB000000020210E001934240500010A001B97A0
64237+:101EC00000002021010A5824156AFFD98F8C004494
64238+:101ED000A0A600FC0A001B84A386005A30A500FFC0
64239+:101EE0002406000124A9000100C9102B1040000C99
64240+:101EF00000004021240A000100A61823308B0001B5
64241+:101F000024C60001006A3804000420421160000267
64242+:101F100000C9182B010740251460FFF800A61823FC
64243+:101F200003E000080100102127BDFFD8AFB0001862
64244+:101F30008F90004CAFB1001CAFBF00202403FFFF07
64245+:101F40002411002FAFA30010920600002405000802
64246+:101F500026100001006620260E001BB0308400FF12
64247+:101F600000021E003C021EDC34466F410A001BD8F2
64248+:101F70000000102110A00009008018212445000154
64249+:101F800030A2FFFF2C4500080461FFFA0003204047
64250+:101F90000086202614A0FFF9008018210E001BB037
64251+:101FA000240500208FA300102629FFFF313100FFF8
64252+:101FB00000034202240700FF1627FFE20102182651
64253+:101FC00000035027AFAA0014AFAA00100000302170
64254+:101FD00027A8001027A7001400E6782391ED00033E
64255+:101FE00024CE000100C8602131C600FF2CCB0004C4
64256+:101FF0001560FFF9A18D00008FA200108FBF002097
64257+:102000008FB1001C8FB0001803E0000827BD002826
64258+:1020100027BDFFD0AFB3001CAFB00010AFBF00288A
64259+:10202000AFB50024AFB40020AFB20018AFB10014B8
64260+:102030003C0C80008D880128240FFF803C06800A1C
64261+:1020400025100100250B0080020F68243205007F57
64262+:10205000016F7024AD8E009000A62821AD8D002464
64263+:1020600090A600FC3169007F3C0A8004012A1821F7
64264+:10207000A386005A9067007C00809821AF830030CF
64265+:1020800030E20002AF88006CAF85003800A0182154
64266+:10209000144000022404003424040030A3840048C7
64267+:1020A0008C7200DC30D100FF24040004AF92006089
64268+:1020B00012240004A38000688E7400041680001EA1
64269+:1020C0003C0880009386005930C7000110E0000FE3
64270+:1020D0008F9300608CB000848CA800842404FF805F
64271+:1020E000020410240002F940310A007F03EA482567
64272+:1020F0003C0C2000012C902530CD00FE3C038000DC
64273+:10210000AC720830A38D00598F9300608FBF0028F8
64274+:102110008FB50024ACB300DC8FB400208FB3001C5B
64275+:102120008FB200188FB100148FB00010240200018C
64276+:1021300003E0000827BD00308E7F000895020120D3
64277+:102140008E67001003E2C8213326FFFF30D8000F4E
64278+:1021500033150001AF87003416A00058A39800582B
64279+:1021600035090C000309382100D81823AD03008479
64280+:10217000AF8700648E6A00043148FFFF1100007EC3
64281+:10218000A78A005C90AC00D42407FF8000EC3024C8
64282+:1021900030CB00FF1560004B9786005C938E005A91
64283+:1021A000240D000230D5FFFF11CD02A20000A021B6
64284+:1021B0008F85006002A5802B160000BC9388004824
64285+:1021C0003C11800096240120310400FF1485008812
64286+:1021D0008F8400648F9800343312000356400085CA
64287+:1021E00030A500FF8F900064310C00FF24060034FE
64288+:1021F00011860095AF90004C9204000414800118E1
64289+:102200008F8E0038A380003C8E0D00048DC800D84E
64290+:102210003C0600FF34CCFFFF01AC30240106182B34
64291+:1022200014600120AF8600548F8700609798005C8F
64292+:10223000AF8700400307402310C000C7A788005C99
64293+:102240008F91003030C3000300035823922A007C92
64294+:102250003171000302261021000A20823092000111
64295+:102260000012488000492821311FFFFF03E5C82BD9
64296+:10227000132001208F8800388F8500348F880064F8
64297+:102280001105025A3C0E3F018E0600003C0C250051
64298+:1022900000CE682411AC01638F84004C30E500FF50
64299+:1022A0000E00184A000030218F8800388F870060A8
64300+:1022B0008F8500340A001DB78F8600540A001C5613
64301+:1022C000AF87006490A400D400E48024320200FFB1
64302+:1022D000104000169386005990A6008890AE00D753
64303+:1022E00024A8008830D4003F2686FFE02CD10020AF
64304+:1022F000A38E003C1220000CAF88004C240B000180
64305+:1023000000CB20043095001916A0012B3C0680005C
64306+:1023100034CF0002008FC0241700022E3099002015
64307+:1023200017200234000000009386005930CB0001D2
64308+:102330001160000F9788005C8CBF00848CA900841A
64309+:10234000240AFF8003EA6024000C19403132007F28
64310+:10235000007238253C0D200000EDC82530D800FE65
64311+:102360003C0F8000ADF90830A39800599788005CB5
64312+:102370001500FF84000000008E630020306200041E
64313+:102380001040FF51938600592404FFFB0064802411
64314+:102390003C038000AE700020346601808C7301B86D
64315+:1023A0000660FFFE8F98006C347501003C1400013C
64316+:1023B000ACD800008C6B012424076085ACCB0004F2
64317+:1023C0008EAE000401D488245220000124076083CB
64318+:1023D00024190002A4C700083C0F1000A0D9000B6C
64319+:1023E0003C068000ACCF01B80A001C2B9386005934
64320+:1023F00030A500FF0E00184A240600018F88006CEB
64321+:102400003C05800034A90900250201889388004812
64322+:10241000304A0007304B00783C0340802407FF809F
64323+:102420000163C825014980210047F824310C00FFD1
64324+:1024300024060034ACBF0800AF90004CACB90810C3
64325+:102440005586FF6E920400048F8400388E11003090
64326+:10245000908E00D431CD001015A000108F83006045
64327+:102460002C6F000515E000E400000000909800D4F7
64328+:102470002465FFFC331200101640000830A400FF52
64329+:102480008F9F00648F99003413F90004388700018E
64330+:1024900030E20001144001C8000000000E001BC320
64331+:1024A000000000000A001DF8000000008F84006496
64332+:1024B00030C500FF0E00184A24060001939800481A
64333+:1024C000240B0034130B00A08F8500388F8600602A
64334+:1024D0009783005C306EFFFF00CE8823AF910060D1
64335+:1024E000A780005C1280FF90028018212414FFFD59
64336+:1024F0005474FFA28E6300208E6A00042403FFBF81
64337+:102500002408FFEF0155F823AE7F000490AC00D4FF
64338+:102510003189007FA0A900D48E7200208F8F0038EF
64339+:10252000A780005C364D0002AE6D0020A5E000BC27
64340+:1025300091E500D400A3C824A1F900D48F950038F8
64341+:10254000AEA000EC92B800D403085824A2AB00D48B
64342+:102550000A001CD78F8500388F910034AF8000604F
64343+:1025600002275821AF8B0034000020212403FFFFF5
64344+:10257000108301B48F8500388E0C00103C0D0800CC
64345+:102580008DAD31B09208000031843FFF008D802B6B
64346+:1025900012000023310D003F3C1908008F3931A88B
64347+:1025A0008F9F006C000479802408FF80033F202166
64348+:1025B000008FC821938500590328F8243C06008029
64349+:1025C0003C0F800034D80001001F91403331007F60
64350+:1025D0008F8600380251502535EE0940332B0078A4
64351+:1025E000333000073C0310003C02800C017890253A
64352+:1025F000020E48210143C0250222382134AE0001D9
64353+:10260000ADFF0804AF890050ADF20814AF87004455
64354+:10261000ADFF0028ACD90084ADF80830A38E005976
64355+:102620009383005A24070003106700272407000142
64356+:102630001467FFAC8F8500382411002311B1008589
64357+:1026400000000000240E000B026020210E001A4A38
64358+:10265000A38E00680040A0210A001D328F8500383B
64359+:1026600002602021240B000C0E001A4AA38B006884
64360+:10267000240AFFFF104AFFBD2404FFFF8F8E00389D
64361+:10268000A380003C8E0D00048DC800D83C0600FFDE
64362+:1026900034CCFFFF01AC30240106182B1060FEE2A1
64363+:1026A000AF86005402602021241200190E001A4A3D
64364+:1026B000A3920068240FFFFF104FFFAC2404FFFF1C
64365+:1026C0000A001C838F86005425A3FFE02C74002091
64366+:1026D0001280FFDD240E000B000328803C1108014E
64367+:1026E000263194B400B148218D2D000001A00008CE
64368+:1026F000000000008F85003400A710219385003C66
64369+:10270000AF82003402251821A383003C951F00BC32
64370+:102710000226282137F91000A51900BC5240FF926B
64371+:10272000AF850060246A0004A38A003C950900BCC0
64372+:1027300024A40004AF84006035322000A51200BC40
64373+:102740000A001D54000020218F8600602CC800055F
64374+:102750001500FF609783005C3065FFFF00C5C8234C
64375+:102760002F2F000511E00003306400FF24CDFFFC93
64376+:1027700031A400FF8F8900648F920034113200046D
64377+:10278000389F000133EC0001158001380000000083
64378+:102790008F840038908700D434E60010A08600D4DF
64379+:1027A0008F8500388F8600609783005CACA000ECBA
64380+:1027B0000A001D2F306EFFFF8CB500848CB400849E
64381+:1027C0003C04100002A7302400068940328E007FAE
64382+:1027D000022E8025020410253C08800024050001FB
64383+:1027E00002602021240600010E0019A9AD02083064
64384+:1027F0000A001CC38F8500388C8200EC1222FE7EFA
64385+:102800000260202124090005A38900680E001A4AED
64386+:102810002411FFFF1451FE782404FFFF0A001D5508
64387+:102820002403FFFF8F8F004C8F8800388DF8000045
64388+:10283000AD1800888DE70010AD0700988F87006005
64389+:102840000A001DB78F8600542406FFFF118600057D
64390+:10285000000000000E001B4C026020210A001D8FAA
64391+:102860000040A0210E001AD1026020210A001D8F15
64392+:102870000040A0218F90004C3C0208008C4231B0F7
64393+:102880008E110010322C3FFF0182282B10A0000C6B
64394+:10289000240BFF808F85005090A3000D01637024EE
64395+:1028A00031CA00FF1140000702602021001143825D
64396+:1028B000310600032418000110D8010600000000B2
64397+:1028C000026020212403000D0E001A4AA383006831
64398+:1028D000004020218F8500380A001D320080A02191
64399+:1028E0008F90004C3C0A08008D4A31B08F85005013
64400+:1028F0008E0400100000A0218CB1001430823FFF34
64401+:10290000004A602B8CB200205180FFEE0260202133
64402+:1029100090B8000D240BFF800178702431C300FFB4
64403+:102920005060FFE80260202100044382310600036A
64404+:1029300014C0FFE40260202194BF001C8F9900386E
64405+:102940008E060028A73F00E88CAF0010022F20233E
64406+:1029500014C4013A026020218F83005400C368210F
64407+:10296000022D382B14E00136240200188F8A00440F
64408+:102970008F820030024390218D4B00100163702341
64409+:10298000AD4E0010AD5200208C4C00740192282BEB
64410+:1029900014A0015F026020218F8400508E08002463
64411+:1029A0008C86002411060007026020212419001CD7
64412+:1029B0000E001A4AA3990068240FFFFF104FFFC5AD
64413+:1029C0002404FFFF8F8400448C87002424FF00012F
64414+:1029D000AC9F00241251012F8F8D00308DB10074F7
64415+:1029E0001232012C3C0B00808E0E000001CB5024D3
64416+:1029F00015400075000000008E0300142411FFFF35
64417+:102A0000107100073C0808003C0608008CC6319095
64418+:102A100000C8C0241300015202602021A380006876
64419+:102A20008E0300003C19000100792024108000135F
64420+:102A30003C1F0080007FA02416800009020028218E
64421+:102A4000026020212411001A0E001A4AA391006886
64422+:102A50002407FFFF1047FF9F2404FFFF02002821E7
64423+:102A6000026020210E001A6A240600012410FFFFD4
64424+:102A70001050FF982404FFFF241400018F8D0044A0
64425+:102A8000026020210280302195A900342405000134
64426+:102A9000253200010E0019A9A5B200340000202142
64427+:102AA0008F8500380A001D320080A0218F90004CD5
64428+:102AB0003C1408008E9431B08E07001030E53FFFC3
64429+:102AC00000B4C82B132000618F8600502412FF80B1
64430+:102AD00090C9000D0249682431A400FF5080005CB9
64431+:102AE000026020218F8C00541180000700078B8228
64432+:102AF0008F8500388F82FCC094BF0080944A000A02
64433+:102B0000515F00F78F8600403227000314E0006415
64434+:102B100000000000920E000211C000D8000000006A
64435+:102B20008E0B0024156000D902602021920400035E
64436+:102B300024190002308500FF14B90005308900FF18
64437+:102B40008F940054128000EA240D002C308900FF7D
64438+:102B5000392C00102D8400012D3200010244302553
64439+:102B6000020028210E001A6A026020212410FFFFB3
64440+:102B7000105000BF8F8500388F830054106000D341
64441+:102B8000240500013C0A08008D4A318C0143F82BD2
64442+:102B900017E000B22402002D02602021000028214D
64443+:102BA0000E0019A9240600018F85003800001821A5
64444+:102BB0000A001D320060A0210E0018750000000000
64445+:102BC0000A001DF800000000AC8000200A001E78FA
64446+:102BD0008E03001400002821026020210E0019A994
64447+:102BE000240600010A001CC38F8500380A001DB7A7
64448+:102BF0008F8800388CAA00848CAC00843C031000C1
64449+:102C00000147F824001F91403189007F024968255F
64450+:102C100001A32825ACC50830910700012405000157
64451+:102C2000026020210E0019A930E600010A001CC331
64452+:102C30008F850038938F00482403FFFD0A001D3460
64453+:102C4000AF8F00600A001D342403FFFF02602021C3
64454+:102C50002410000D0E001A4AA390006800401821AD
64455+:102C60008F8500380A001D320060A0210E00187503
64456+:102C7000000000009783005C8F86006000402021E8
64457+:102C80003070FFFF00D010232C4A00051140FE11C8
64458+:102C90008F850038ACA400EC0A001D2F306EFFFFBA
64459+:102CA00090CF000D31E300085460FFA192040003AF
64460+:102CB00002602021240200100E001A4AA38200683C
64461+:102CC0002403FFFF5443FF9A920400030A001F12DB
64462+:102CD0008F85003890A4000D308F000811E000951A
64463+:102CE0008F990054572000A6026020218E1F000CEF
64464+:102CF0008CB4002057F40005026020218E0D0008DE
64465+:102D00008CA7002411A7003A026020212402002091
64466+:102D1000A38200680E001A4A2412FFFF1052FEED33
64467+:102D20002404FFFF8F9F00442402FFF73C14800E11
64468+:102D300093EA000D2419FF803C03800001423824EF
64469+:102D4000A3E7000D8F9F00303C0908008D2931ACAE
64470+:102D50008F8C006C97F200788F870044012C302113
64471+:102D6000324D7FFF000D204000C4782131E5007F07
64472+:102D700000B4C02101F94024AC68002CA711000068
64473+:102D80008CEB0028256E0001ACEE00288CEA002CAC
64474+:102D90008E02002C01426021ACEC002C8E09002C2C
64475+:102DA000ACE900308E120014ACF2003494ED003A1D
64476+:102DB00025A40001A4E4003A97E600783C1108003D
64477+:102DC0008E3131B024C3000130707FFF1211005CDE
64478+:102DD000006030218F8F0030026020212405000127
64479+:102DE0000E001934A5E600780A001EA1000020217B
64480+:102DF0008E0900142412FFFF1132006B8F8A0038F5
64481+:102E00008E0200188D4C00D0144C00650260202109
64482+:102E10008E0B00248CAE0028116E005B2402002172
64483+:102E20000E001A4AA38200681452FFBE2404FFFF5A
64484+:102E30008F8500380A001D320080A0212402001F67
64485+:102E40000E001A4AA38200682409FFFF1049FEA160
64486+:102E50002404FFFF0A001E548F83005402602021C7
64487+:102E60000E001A4AA38200681450FF508F85003864
64488+:102E70002403FFFF0A001D320060A0218CD800242B
64489+:102E80008E0800241118FF29026020210A001F2744
64490+:102E90002402000F8E0900003C05008001259024CB
64491+:102EA0001640FF492402001A026020210E001A4A2F
64492+:102EB000A3820068240CFFFF144CFECF2404FFFF04
64493+:102EC0008F8500380A001D320080A0210E001934C1
64494+:102ED000026020218F8500380A001EE500001821BD
64495+:102EE0002403FFFD0060A0210A001D32AF860060B0
64496+:102EF000026020210E001A4AA38D00682403FFFF00
64497+:102F00001043FF588F8500380A001ECC920400033E
64498+:102F10002418001D0E001A4AA39800682403FFFF1E
64499+:102F20001443FE9D2404FFFF8F8500380A001D32E4
64500+:102F30000080A021026020210A001F3D24020024FD
64501+:102F4000240880000068C024330BFFFF000B73C20D
64502+:102F500031D000FF001088270A001F6E001133C017
64503+:102F6000240F001B0E001A4AA38F00681451FEACF8
64504+:102F70002404FFFF8F8500380A001D320080A02145
64505+:102F80000A001F3D240200278E0600288CA3002C77
64506+:102F900010C30008026020210A001F812402001FC4
64507+:102FA0000A001F812402000E026020210A001F81F6
64508+:102FB000240200258E04002C1080000D8F8F00301D
64509+:102FC0008DE800740104C02B5700000C0260202122
64510+:102FD0008CB900140086A0210334282B10A0FF52C6
64511+:102FE0008F9F0044026020210A001F8124020022DA
64512+:102FF000026020210A001F81240200230A001F8191
64513+:103000002402002627BDFFD8AFB3001CAFB10014C7
64514+:10301000AFBF0020AFB20018AFB000103C0280007C
64515+:103020008C5201408C4B01483C048000000B8C0208
64516+:10303000322300FF317300FF8C8501B804A0FFFE2E
64517+:1030400034900180AE1200008C8701442464FFF0AC
64518+:10305000240600022C830013AE070004A61100080A
64519+:10306000A206000BAE1300241060004F8FBF00209B
64520+:10307000000448803C0A0801254A9534012A402171
64521+:103080008D04000000800008000000003C030800E0
64522+:103090008C6331A831693FFF00099980007280215B
64523+:1030A000021370212405FF80264D0100264C00806C
64524+:1030B0003C02800031B1007F3198007F31CA007F2F
64525+:1030C0003C1F800A3C1980043C0F800C01C5202461
64526+:1030D00001A5302401853824014F1821AC46002475
64527+:1030E000023F402103194821AC470090AC4400281E
64528+:1030F000AF830044AF880038AF8900300E0019005C
64529+:10310000016080213C0380008C6B01B80560FFFEEC
64530+:103110008F8700448F8600383465018090E8000D69
64531+:10312000ACB20000A4B0000600082600000416039C
64532+:1031300000029027001227C21080008124C200885C
64533+:10314000241F6082A4BF0008A0A000052402000282
64534+:10315000A0A2000B8F8B0030000424003C08270045
64535+:1031600000889025ACB20010ACA00014ACA00024E4
64536+:10317000ACA00028ACA0002C8D6900382413FF807F
64537+:10318000ACA9001890E3000D02638024320500FF13
64538+:1031900010A000058FBF002090ED000D31AC007F26
64539+:1031A000A0EC000D8FBF00208FB3001C8FB2001861
64540+:1031B0008FB100148FB000103C0A10003C0E80004C
64541+:1031C00027BD002803E00008ADCA01B8265F010052
64542+:1031D0002405FF8033F8007F3C06800003E5782457
64543+:1031E0003C19800A03192021ACCF0024908E00D412
64544+:1031F00000AE682431AC00FF11800024AF84003899
64545+:10320000248E008895CD00123C0C08008D8C31A8CE
64546+:1032100031AB3FFF01924821000B5180012A402130
64547+:1032200001052024ACC400283107007F3C06800C37
64548+:1032300000E620219083000D00A31024304500FFFC
64549+:1032400010A0FFD8AF8400449098000D330F0010F9
64550+:1032500015E0FFD58FBF00200E0019000000000010
64551+:103260003C0380008C7901B80720FFFE00000000BD
64552+:10327000AE1200008C7F0144AE1F0004A6110008AE
64553+:1032800024110002A211000BAE1300243C1308010C
64554+:10329000927396F0327000015200FFC38FBF00207E
64555+:1032A0000E002146024020210A0020638FBF00202B
64556+:1032B0003C1260008E452C083C03F0033462FFFF93
64557+:1032C00000A2F824AE5F2C088E582C083C1901C0CF
64558+:1032D00003199825AE532C080A0020638FBF0020E5
64559+:1032E000264D010031AF007F3C10800A240EFF8084
64560+:1032F00001F0282101AE60243C0B8000AD6C00245D
64561+:103300001660FFA8AF85003824110003A0B100FCAF
64562+:103310000A0020638FBF002026480100310A007F89
64563+:103320003C0B800A2409FF80014B30210109202435
64564+:103330003C078000ACE400240A002062AF8600381D
64565+:10334000944E0012320C3FFF31CD3FFF15ACFF7D94
64566+:10335000241F608290D900D42418FF800319782498
64567+:1033600031EA00FF1140FF7700000000240700044D
64568+:10337000A0C700FC8F870044241160842406000D40
64569+:10338000A4B10008A0A600050A00204D24020002F6
64570+:103390003C040001248496DC24030014240200FE73
64571+:1033A0003C010800AC2431EC3C010800AC2331E8BE
64572+:1033B0003C010801A42296F83C040801248496F8F4
64573+:1033C0000000182100643021A0C300042463000120
64574+:1033D0002C6500FF54A0FFFC006430213C0708006E
64575+:1033E00024E7010003E00008AF87007800A058211F
64576+:1033F000008048210000102114A00012000050217C
64577+:103400000A002142000000003C010801A42096F8B7
64578+:103410003C05080194A596F88F8200783C0C0801C1
64579+:10342000258C96F800E2182100AC2021014B302BAE
64580+:10343000A089000400001021A460000810C0003919
64581+:10344000010048218F8600780009384000E94021BA
64582+:103450000008388000E6282190A8000B90B9000AE7
64583+:103460000008204000881021000218800066C0215A
64584+:10347000A319000A8F85007800E5782191EE000AF3
64585+:1034800091E6000B000E684001AE6021000C208028
64586+:1034900000851021A046000B3C030801906396F2C2
64587+:1034A000106000222462FFFF8F8300383C01080176
64588+:1034B000A02296F2906C00FF118000040000000032
64589+:1034C000906E00FF25CDFFFFA06D00FF3C190801A5
64590+:1034D000973996F8272300013078FFFF2F0F00FF60
64591+:1034E00011E0FFC9254A00013C010801A42396F818
64592+:1034F0003C05080194A596F88F8200783C0C0801E1
64593+:10350000258C96F800E2182100AC2021014B302BCD
64594+:10351000A089000400001021A460000814C0FFC9A5
64595+:103520000100482103E000080000000003E000085B
64596+:103530002402000227BDFFE0248501002407FF804C
64597+:10354000AFB00010AFBF0018AFB1001400A718242F
64598+:103550003C10800030A4007F3C06800A00862821B1
64599+:103560008E110024AE03002490A200FF1440000836
64600+:10357000AF850038A0A000098FBF0018AE1100244D
64601+:103580008FB100148FB0001003E0000827BD0020A9
64602+:1035900090A900FD90A800FF312400FF0E0020F448
64603+:1035A000310500FF8F8500388FBF0018A0A00009EB
64604+:1035B000AE1100248FB100148FB0001003E000089A
64605+:1035C00027BD002027BDFFD0AFB20020AFB1001C47
64606+:1035D000AFB00018AFBF002CAFB40028AFB30024C9
64607+:1035E0003C0980009533011635320C00952F011AE5
64608+:1035F0003271FFFF023280218E08000431EEFFFF9E
64609+:10360000248B0100010E6821240CFF8025A5FFFFFB
64610+:10361000016C50243166007F3C07800AAD2A0024EB
64611+:1036200000C73021AF850074AF8800703C010801ED
64612+:10363000A02096F190C300090200D02100809821BB
64613+:10364000306300FF2862000510400048AF86003854
64614+:10365000286400021480008E24140001240D00054B
64615+:103660003C010801A02D96D590CC00FD3C0108013D
64616+:10367000A02096D63C010801A02096D790CB000A46
64617+:10368000240AFF80318500FF014B4824312700FFC9
64618+:1036900010E0000C000058213C12800836510080D8
64619+:1036A0008E2F00308CD0005C01F0702305C0018E9D
64620+:1036B0008F87007090D4000A3284007FA0C4000A73
64621+:1036C0008F8600383C118008363000808E0F003025
64622+:1036D0008F87007000EF702319C000EE000000001B
64623+:1036E00090D4000924120002328400FF1092024795
64624+:1036F000000000008CC2005800E2F82327F9FFFF09
64625+:103700001B2001300000000090C5000924080004BF
64626+:1037100030A300FF10680057240A00013C01080193
64627+:10372000A02A96D590C900FF252700013C01080179
64628+:10373000A02796D43C030801906396D52406000583
64629+:103740001066006A2C780005130000C40000902168
64630+:103750000003F8803C0408012484958003E4C82118
64631+:103760008F25000000A0000800000000241800FFC2
64632+:103770001078005C0000000090CC000A90CA00099C
64633+:103780003C080801910896F13187008000EA48253D
64634+:103790003C010801A02996DC90C500FD3C140801FD
64635+:1037A000929496F2311100013C010801A02596DDAA
64636+:1037B00090DF00FE3C010801A03F96DE90D200FFA2
64637+:1037C0003C010801A03296DF8CD900543C0108016D
64638+:1037D000AC3996E08CD000583C010801AC3096E43E
64639+:1037E0008CC3005C3C010801AC3496EC3C01080140
64640+:1037F000AC2396E8162000088FBF002C8FB4002859
64641+:103800008FB300248FB200208FB1001C8FB000183E
64642+:1038100003E0000827BD00303C1180009624010E13
64643+:103820000E000FD43094FFFF3C0B08018D6B96F413
64644+:103830000260382102802821AE2B01803C13080150
64645+:103840008E7396D401602021240600830E00102F71
64646+:10385000AFB300108FBF002C8FB400288FB30024AB
64647+:103860008FB200208FB1001C8FB0001803E0000859
64648+:1038700027BD00303C1808008F1831FC270F0001CD
64649+:103880003C010800AC2F31FC0A0021D700000000E9
64650+:103890001474FFB900000000A0C000FF3C05080040
64651+:1038A0008CA531E43C0308008C6331E03C02080045
64652+:1038B0008C4232048F99003834A80001241F000282
64653+:1038C0003C010801AC2396F43C010801A02896F0C5
64654+:1038D0003C010801A02296F3A33F00090A002190B1
64655+:1038E0008F8600380E002146000000000A0021D714
64656+:1038F0008F8600383C1F080193FF96D424190001DD
64657+:1039000013F902298F8700703C100801921096D895
64658+:103910003C06080190C696D610C000050200A02102
64659+:103920003C040801908496D9109001E48F870078B8
64660+:10393000001088408F9F0078023048210009C8801D
64661+:10394000033F702195D80008270F0001A5CF00087C
64662+:103950003C040801908496D93C05080190A596D6B0
64663+:103960000E0020F4000000008F8700780230202134
64664+:103970000004308000C720218C8500048F820074F1
64665+:1039800000A2402305020006AC8200048C8A0000DD
64666+:103990008F830070014310235C400001AC83000062
64667+:1039A0008F86003890CB00FF2D6C00025580002DD3
64668+:1039B000241400010230F821001F40800107282153
64669+:1039C00090B9000B8CAE00040019C0400319782197
64670+:1039D000000F1880006710218C4D000001AE882375
64671+:1039E0002630FFFF5E00001F241400018C440004F9
64672+:1039F0008CAA0000008A482319200019240E000414
64673+:103A00003C010801A02E96D590AD000B8CAB0004B4
64674+:103A1000000D8840022D80210010108000471021E9
64675+:103A20008C44000401646023058202009443000872
64676+:103A300090DF00FE90B9000B33E500FF54B900049D
64677+:103A40000107A021A0D400FE8F8700780107A021E4
64678+:103A50009284000B0E0020F4240500018F860038AC
64679+:103A600024140001125400962E500001160000424A
64680+:103A70003C08FFFF241900021659FF3F0000000018
64681+:103A8000A0C000FF8F860038A0D200090A0021D70D
64682+:103A90008F86003890C700092404000230E300FF3D
64683+:103AA0001064016F24090004106901528F880074AA
64684+:103AB0008CCE0054010E682325B10001062001754B
64685+:103AC000241800043C010801A03896D53C010801E7
64686+:103AD000A02096D490D400FD90D200FF2E4F00027B
64687+:103AE00015E0FF14328400FF000438408F8900780D
64688+:103AF00090DF00FF00E41021000220800089C8212F
64689+:103B00002FE500029324000B14A0FF0A24070002F3
64690+:103B100000041840006480210010588001692821A9
64691+:103B20008CAC0004010C50230540FF020000000093
64692+:103B30003C030801906396D614600005246F0001D1
64693+:103B40003C010801A02496D93C010801A02796D782
64694+:103B50003C010801A02F96D690CE00FF24E700017B
64695+:103B600031CD00FF01A7882B1220FFE990A4000BA4
64696+:103B70000A0021C6000000003C0508018CA596D46F
64697+:103B80003C12000400A8F82413F2000624020005E9
64698+:103B90003C090801912996D5152000022402000352
64699+:103BA000240200053C010801A02296F190C700FF05
64700+:103BB00014E0012024020002A0C200090A0021D75B
64701+:103BC0008F86003890CC00FF1180FEDA240A0001B5
64702+:103BD0008F8C00748F890078240F00030180682186
64703+:103BE0001160001E240E0002000540400105A021C6
64704+:103BF00000142080008990218E51000401918023BF
64705+:103C00000600FECC000000003C020801904296D65F
64706+:103C100014400005245800013C010801A02A96D751
64707+:103C20003C010801A02596D93C010801A03896D690
64708+:103C300090DF00FF010510210002C88033E500FF7E
64709+:103C4000254A00010329202100AA402B1500FEB9B6
64710+:103C50009085000B1560FFE50005404000054040E1
64711+:103C600001051821000310803C010801A02A96D408
64712+:103C70003C010801A02596D8004918218C64000455
64713+:103C800000E4F82327F9FFFF1F20FFE900000000F0
64714+:103C90008C63000000E358230560013A01A38823E8
64715+:103CA00010E301170184C0231B00FEA200000000E6
64716+:103CB0003C010801A02E96D50A002305240B000123
64717+:103CC000240E0004A0CE00093C0D08008DAD31F893
64718+:103CD0008F86003825A200013C010800AC2231F893
64719+:103CE0000A0021D7000000008CD9005C00F9C02335
64720+:103CF0001F00FE7B000000008CDF005C10FFFF65F2
64721+:103D00008F8400748CC3005C008340232502000173
64722+:103D10001C40FF60000000008CC9005C248700018B
64723+:103D200000E9282B10A0FE943C0D80008DAB01040F
64724+:103D30003C0C0001016C50241140FE8F2402001045
64725+:103D40003C010801A02296F10A0021D700000000E2
64726+:103D50008F9100748F86003826220001ACC2005C6F
64727+:103D60000A002292241400018F8700382404FF8067
64728+:103D70000000882190E9000A241400010124302564
64729+:103D8000A0E6000A3C05080190A596D63C0408016F
64730+:103D9000908496D90E0020F4000000008F86003831
64731+:103DA0008F85007890C800FD310700FF0007404074
64732+:103DB0000107F821001FC0800305C8219323000BD1
64733+:103DC000A0C300FD8F8500788F8600380305602131
64734+:103DD000918F000B000F704001CF6821000D808093
64735+:103DE000020510218C4B0000ACCB00548D840004E4
64736+:103DF0008F83007400645023194000022482000164
64737+:103E00002462000101074821ACC2005C0009308037
64738+:103E100000C5402100E02021240500010E0020F40F
64739+:103E20009110000B8F86003890C500FF10A0FF0C8A
64740+:103E3000001070408F85007801D06821000D10803F
64741+:103E4000004558218D6400008F8C0074018450233C
64742+:103E50002547000104E0FF02263100013C03080170
64743+:103E6000906396D62E2F0002247800013C010801B1
64744+:103E7000A03896D63C010801A03496D711E0FEF890
64745+:103E8000020038210A002365000740408F84003873
64746+:103E90008F8300748C85005800A340230502FE9A8E
64747+:103EA000AC8300580A00223B000000003C070801D8
64748+:103EB00090E796F2240200FF10E200BE8F860038E1
64749+:103EC0003C110801963196FA3C030801246396F8E8
64750+:103ED000262500013230FFFF30ABFFFF02036021D7
64751+:103EE0002D6A00FF1540008D918700043C010801F8
64752+:103EF000A42096FA8F88003800074840012728211F
64753+:103F0000911800FF000530802405000127140001EE
64754+:103F1000A11400FF3C120801925296F28F8800789B
64755+:103F20008F8E0070264F000100C820213C0108013F
64756+:103F3000A02F96F2AC8E00008F8D0074A48500082F
64757+:103F4000AC8D00043C030801906396D414600077A4
64758+:103F5000000090213C010801A02596D4A087000B09
64759+:103F60008F8C007800CC5021A147000A8F82003846
64760+:103F7000A04700FD8F840038A08700FE8F860038A0
64761+:103F80008F9F0070ACDF00548F990074ACD900583B
64762+:103F90008F8D00780127C02100185880016DA02165
64763+:103FA000928F000A000F704001CF18210003888013
64764+:103FB000022D8021A207000B8F8600780166602108
64765+:103FC000918A000B000A1040004A2021000428803A
64766+:103FD00000A64021A107000A3C07800834E90080C0
64767+:103FE0008D2200308F860038ACC2005C0A0022921D
64768+:103FF0002414000190CA00FF1540FEAD8F880074A4
64769+:10400000A0C400090A0021D78F860038A0C000FD97
64770+:104010008F98003824060001A30000FE3C0108012F
64771+:10402000A02696D53C010801A02096D40A0021C6FE
64772+:104030000000000090CB00FF3C040801908496F340
64773+:10404000316C00FF0184502B1540000F2402000347
64774+:1040500024020004A0C200090A0021D78F8600387C
64775+:1040600090C3000A2410FF8002035824316C00FF23
64776+:104070001180FDC1000000003C010801A02096D580
64777+:104080000A0021C600000000A0C200090A0021D7D2
64778+:104090008F86003890D4000A2412FF8002544824EE
64779+:1040A000312800FF1500FFF4240200083C0108013C
64780+:1040B000A02296F10A0021D70000000000108840DD
64781+:1040C0008F8B0070023018210003688001A7202127
64782+:1040D000AC8B00008F8A0074240C0001A48C0008B3
64783+:1040E000AC8A00043C05080190A596D62402000184
64784+:1040F00010A2FE1E24A5FFFF0A0022519084000B8F
64785+:104100000184A0231A80FD8B000000003C010801FF
64786+:10411000A02E96D50A002305240B00013C010801BE
64787+:10412000A42596FA0A0023B78F880038240B0001D3
64788+:10413000106B00228F9800388F85003890BF00FFE9
64789+:1041400033F900FF1079002B000000003C1F08012C
64790+:1041500093FF96D8001FC840033FC0210018A080DD
64791+:104160000288782191EE000AA08E000A8F8D0078D7
64792+:104170003C030801906396D800CD88210A0023DD16
64793+:10418000A223000B263000010600003101A4902379
64794+:104190000640002B240200033C010801A02F96D505
64795+:1041A0000A002305240B00018F8900380A00223BF6
64796+:1041B000AD2700540A00229124120001931400FD3F
64797+:1041C000A094000B8F8800388F8F0078910E00FE2E
64798+:1041D00000CF6821A1AE000A8F910038A22700FD10
64799+:1041E0008F8300708F900038AE0300540A0023DEE6
64800+:1041F0008F8D007890B000FEA090000A8F8B003861
64801+:104200008F8C0078916A00FD00CC1021A04A000B31
64802+:104210008F840038A08700FE8F8600748F85003859
64803+:10422000ACA600580A0023DE8F8D007894B80008F1
64804+:10423000ACA40004030378210A002285A4AF00087F
64805+:104240003C010801A02296D50A0021C6000000000A
64806+:1042500090CF0009240D000431EE00FF11CDFD8543
64807+:10426000240200013C010801A02296D50A0021C6C3
64808+:1042700000000000080033440800334408003420E4
64809+:10428000080033F4080033D8080033280800332826
64810+:10429000080033280800334C8008010080080080A3
64811+:1042A000800800005F865437E4AC62CC50103A4579
64812+:1042B00036621985BF14C0E81BC27A1E84F4B55655
64813+:1042C000094EA6FE7DDA01E7C04D748108005A74DC
64814+:1042D00008005AB808005A5C08005A5C08005A5C8A
64815+:1042E00008005A5C08005A7408005A5C08005A5CBE
64816+:1042F00008005AC008005A5C080059D408005A5CEB
64817+:1043000008005A5C08005AC008005A5C08005A5C51
64818+:1043100008005A5C08005A5C08005A5C08005A5CA5
64819+:1043200008005A5C08005A5C08005A5C08005A5C95
64820+:1043300008005A9408005A5C08005A9408005A5C15
64821+:1043400008005A5C08005A5C08005A9808005A9401
64822+:1043500008005A5C08005A5C08005A5C08005A5C65
64823+:1043600008005A5C08005A5C08005A5C08005A5C55
64824+:1043700008005A5C08005A5C08005A5C08005A5C45
64825+:1043800008005A5C08005A5C08005A5C08005A5C35
64826+:1043900008005A5C08005A5C08005A5C08005A5C25
64827+:1043A00008005A9808005A9808005A5C08005A9861
64828+:1043B00008005A5C08005A5C08005A5C08005A5C05
64829+:1043C00008005A5C08005A5C08005A5C08005A5CF5
64830+:1043D00008005A5C08005A5C08005A5C08005A5CE5
64831+:1043E00008005A5C08005A5C08005A5C08005A5CD5
64832+:1043F00008005A5C08005A5C08005A5C08005A5CC5
64833+:1044000008005A5C08005A5C08005A5C08005A5CB4
64834+:1044100008005A5C08005A5C08005A5C08005A5CA4
64835+:1044200008005A5C08005A5C08005A5C08005A5C94
64836+:1044300008005A5C08005A5C08005A5C08005A5C84
64837+:1044400008005A5C08005A5C08005A5C08005A5C74
64838+:1044500008005A5C08005A5C08005A5C08005A5C64
64839+:1044600008005A5C08005A5C08005A5C08005A5C54
64840+:1044700008005A5C08005A5C08005A5C08005A5C44
64841+:1044800008005A5C08005A5C08005A5C08005A5C34
64842+:1044900008005A5C08005A5C08005A5C08005A5C24
64843+:1044A00008005A5C08005A5C08005A5C08005A5C14
64844+:1044B00008005A5C08005A5C08005A5C08005A5C04
64845+:1044C00008005A5C08005A5C08005A5C08005ADC74
64846+:1044D0000800782C08007A900800783808007628C0
64847+:1044E00008007838080078C4080078380800762872
64848+:1044F0000800762808007628080076280800762824
64849+:104500000800762808007628080076280800762813
64850+:1045100008007628080078580800784808007628AF
64851+:1045200008007628080076280800762808007628F3
64852+:1045300008007628080076280800762808007628E3
64853+:1045400008007628080076280800762808007848B1
64854+:10455000080082FC08008188080082C40800818865
64855+:104560000800829408008070080081880800818813
64856+:1045700008008188080081880800818808008188F7
64857+:1045800008008188080081880800818808008188E7
64858+:104590000800818808008188080081B008008D34F7
64859+:1045A00008008E9008008E70080088D808008D4C96
64860+:1045B0000A00012400000000000000000000000DBF
64861+:1045C000747061362E322E31620000000602010145
64862+:1045D00000000000000000000000000000000000DB
64863+:1045E00000000000000000000000000000000000CB
64864+:1045F00000000000000000000000000000000000BB
64865+:1046000000000000000000000000000000000000AA
64866+:10461000000000000000000000000000000000009A
64867+:10462000000000000000000000000000000000008A
64868+:10463000000000000000000000000000000000007A
64869+:104640000000000010000003000000000000000D4A
64870+:104650000000000D3C020800244217203C03080023
64871+:1046600024632A10AC4000000043202B1480FFFD7F
64872+:10467000244200043C1D080037BD2FFC03A0F0219C
64873+:104680003C100800261004903C1C0800279C1720B2
64874+:104690000E000262000000000000000D2402FF80F6
64875+:1046A00027BDFFE000821024AFB00010AF42002011
64876+:1046B000AFBF0018AFB10014936500043084007FD1
64877+:1046C000034418213C0200080062182130A5002094
64878+:1046D000036080213C080111277B000814A0000220
64879+:1046E0002466005C2466005892020004974301048B
64880+:1046F000920400043047000F3063FFFF3084004015
64881+:10470000006728231080000900004821920200055C
64882+:1047100030420004104000050000000010A000031B
64883+:104720000000000024A5FFFC2409000492020005FB
64884+:1047300030420004104000120000000010A00010E1
64885+:10474000000000009602000200A72021010440257D
64886+:104750002442FFFEA7421016920300042402FF80A9
64887+:1047600000431024304200FF104000033C020400CC
64888+:104770000A000174010240258CC20000AF421018EB
64889+:104780008F4201780440FFFE2402000AA742014044
64890+:1047900096020002240400093042000700021023A0
64891+:1047A00030420007A7420142960200022442FFFE67
64892+:1047B000A7420144A740014697420104A74201488D
64893+:1047C0008F420108304200205040000124040001C3
64894+:1047D00092020004304200101440000234830010A2
64895+:1047E00000801821A743014A0000000000000000DB
64896+:1047F0000000000000000000AF48100000000000B2
64897+:104800000000000000000000000000008F421000C7
64898+:104810000441FFFE3102FFFF1040000700000000CE
64899+:1048200092020004304200401440000300000000E7
64900+:104830008F421018ACC20000960200063042FFFF03
64901+:10484000244200020002104300021040036288214B
64902+:10485000962200001120000D3044FFFF00A7102118
64903+:104860008F8300388F45101C0002108200021080D8
64904+:1048700000431021AC45000030A6FFFF0E00058D5F
64905+:1048800000052C0200402021A62200009203000413
64906+:104890002402FF8000431024304200FF1040001F1C
64907+:1048A0000000000092020005304200021040001B90
64908+:1048B000000000009742100C2442FFFEA742101691
64909+:1048C000000000003C02040034420030AF421000FF
64910+:1048D00000000000000000000000000000000000D8
64911+:1048E0008F4210000441FFFE000000009742100CB0
64912+:1048F0008F45101C3042FFFF24420030000210821E
64913+:1049000000021080005B1021AC45000030A6FFFFC4
64914+:104910000E00058D00052C02A62200009604000260
64915+:10492000248400080E0001E93084FFFF974401044D
64916+:104930000E0001F73084FFFF8FBF00188FB1001405
64917+:104940008FB000103C02100027BD002003E00008DB
64918+:10495000AF4201783084FFFF308200078F8500244A
64919+:1049600010400002248300073064FFF800A41021E7
64920+:1049700030421FFF03421821247B4000AF850028EE
64921+:10498000AF82002403E00008AF4200843084FFFFC0
64922+:104990003082000F8F85002C8F860034104000027B
64923+:1049A0002483000F3064FFF000A410210046182B70
64924+:1049B000AF8500300046202314600002AF82002C37
64925+:1049C000AF84002C8F82002C340480000342182115
64926+:1049D00000641821AF83003803E00008AF42008074
64927+:1049E0008F820014104000088F8200048F82FFDC49
64928+:1049F000144000058F8200043C02FFBF3442FFFFD9
64929+:104A0000008220248F82000430430006240200022A
64930+:104A10001062000F3C0201012C62000350400005AF
64931+:104A2000240200041060000F3C0200010A00023062
64932+:104A30000000000010620005240200061462000C51
64933+:104A40003C0201110A000229008210253C020011DB
64934+:104A500000821025AF421000240200010A0002303B
64935+:104A6000AF82000C00821025AF421000AF80000C16
64936+:104A700000000000000000000000000003E000084B
64937+:104A8000000000008F82000C1040000400000000B5
64938+:104A90008F4210000441FFFE0000000003E0000808
64939+:104AA000000000008F8200102443F800000231C291
64940+:104AB00024C2FFF02C6303011060000300021042C7
64941+:104AC0000A000257AC8200008F85001800C5102B29
64942+:104AD0001440000B0000182100C5102324470001DA
64943+:104AE0008F82001C00A210212442FFFF0046102BE1
64944+:104AF000544000042402FFFF0A000257AC87000064
64945+:104B00002402FFFF0A000260AC8200008C820000D9
64946+:104B10000002194000621821000318800062182169
64947+:104B2000000318803C0208002442175C0062182130
64948+:104B300003E000080060102127BDFFD8AFBF0020B0
64949+:104B4000AFB1001CAFB000183C0460088C8250006C
64950+:104B50002403FF7F3C066000004310243442380CDD
64951+:104B6000AC8250008CC24C1C3C1A80000002160221
64952+:104B70003042000F10400007AF82001C8CC34C1C59
64953+:104B80003C02001F3442FC0000621824000319C2DA
64954+:104B9000AF8300188F420008275B400034420001B9
64955+:104BA000AF420008AF8000243C02601CAF40008090
64956+:104BB000AF4000848C4500088CC308083402800094
64957+:104BC000034220212402FFF0006218243C020080EE
64958+:104BD0003C010800AC2204203C025709AF84003895
64959+:104BE00014620004AF850034240200010A0002921E
64960+:104BF000AF820014AF8000148F42000038420001E1
64961+:104C0000304200011440FFFC8F8200141040001657
64962+:104C10000000000097420104104000058F8300004F
64963+:104C2000146000072462FFFF0A0002A72C62000A3A
64964+:104C30002C620010504000048F83000024620001A9
64965+:104C4000AF8200008F8300002C62000A1440000332
64966+:104C50002C6200070A0002AEAF80FFDC10400002A9
64967+:104C600024020001AF82FFDC8F4301088F44010062
64968+:104C700030622000AF83000410400008AF840010B1
64969+:104C80003C0208008C42042C244200013C01080034
64970+:104C9000AC22042C0A00058A3C0240003065020068
64971+:104CA00014A0000324020F001482026024020D00ED
64972+:104CB00097420104104002C83C02400030624000AC
64973+:104CC000144000AD8F8200388C4400088F42017878
64974+:104CD0000440FFFE24020800AF42017824020008CD
64975+:104CE000A7420140A7400142974201048F8400047B
64976+:104CF0003051FFFF30820001104000070220802168
64977+:104D00002623FFFE240200023070FFFFA742014667
64978+:104D10000A0002DBA7430148A74001463C02080005
64979+:104D20008C42043C1440000D8F8300103082002020
64980+:104D30001440000224030009240300010060202124
64981+:104D40008F830010240209005062000134840004A3
64982+:104D5000A744014A0A0002F60000000024020F00E6
64983+:104D60001462000530820020144000062403000D68
64984+:104D70000A0002F524030005144000022403000980
64985+:104D800024030001A743014A3C0208008C4204208E
64986+:104D90003C0400480E00020C004420250E000235A1
64987+:104DA000000000008F82000C1040003E0000000058
64988+:104DB0008F4210003C0300200043102410400039B3
64989+:104DC0008F820004304200021040003600000000D4
64990+:104DD000974210141440003300000000974210085E
64991+:104DE0008F8800383042FFFF2442000600021882FC
64992+:104DF0000003388000E83021304300018CC40000FB
64993+:104E000010600004304200030000000D0A00033768
64994+:104E100000E81021544000103084FFFF3C05FFFFE4
64995+:104E200000852024008518260003182B0004102B71
64996+:104E300000431024104000050000000000000000A6
64997+:104E40000000000D00000000240002228CC20000BF
64998+:104E50000A000336004520253883FFFF0003182B86
64999+:104E60000004102B00431024104000050000000037
65000+:104E7000000000000000000D000000002400022BD4
65001+:104E80008CC200003444FFFF00E81021AC44000055
65002+:104E90003C0208008C420430244200013C0108001E
65003+:104EA000AC2204308F6200008F840038AF8200088B
65004+:104EB0008C8300003402FFFF1462000F00001021F9
65005+:104EC0003C0508008CA504543C0408008C84045064
65006+:104ED00000B0282100B0302B008220210086202144
65007+:104EE0003C010800AC2504543C010800AC240450EB
65008+:104EF0000A000580240400088C8200003042010072
65009+:104F00001040000F000010213C0508008CA5044C47
65010+:104F10003C0408008C84044800B0282100B0302BE9
65011+:104F200000822021008620213C010800AC25044C91
65012+:104F30003C010800AC2404480A0005802404000851
65013+:104F40003C0508008CA504443C0408008C84044003
65014+:104F500000B0282100B0302B0082202100862021C3
65015+:104F60003C010800AC2504443C010800AC2404408A
65016+:104F70000A000580240400088F6200088F62000088
65017+:104F800000021602304300F02402003010620005D7
65018+:104F900024020040106200E08F8200200A00058891
65019+:104FA0002442000114A000050000000000000000E1
65020+:104FB0000000000D00000000240002568F4201781E
65021+:104FC0000440FFFE000000000E00023D27A4001078
65022+:104FD0001440000500408021000000000000000D8A
65023+:104FE000000000002400025D8E0200001040000559
65024+:104FF00000000000000000000000000D00000000A4
65025+:10500000240002608F62000C0443000324020001AC
65026+:105010000A00042EAE000000AE0200008F820038AD
65027+:105020008C480008A20000078F65000C8F64000404
65028+:1050300030A3FFFF0004240200852023308200FFFC
65029+:105040000043102124420005000230832CC200815D
65030+:10505000A605000A14400005A20400040000000098
65031+:105060000000000D00000000240002788F85003849
65032+:105070000E0005AB260400148F6200048F43010864
65033+:10508000A60200083C02100000621824106000080C
65034+:105090000000000097420104920300072442FFEC45
65035+:1050A000346300023045FFFF0A0003C3A203000778
65036+:1050B000974201042442FFF03045FFFF96060008A6
65037+:1050C0002CC200135440000592030007920200070F
65038+:1050D00034420001A20200079203000724020001EB
65039+:1050E00010620005240200031062000B8F8200385A
65040+:1050F0000A0003E030C6FFFF8F8200383C04FFFF48
65041+:105100008C43000C0064182400651825AC43000C87
65042+:105110000A0003E030C6FFFF3C04FFFF8C43001091
65043+:105120000064182400651825AC43001030C6FFFF4A
65044+:1051300024C2000200021083A20200058F830038FF
65045+:10514000304200FF00021080004328218CA800009C
65046+:105150008CA2000024030004000217021443001272
65047+:1051600000000000974201043C03FFFF01031824E4
65048+:105170003042FFFF004610232442FFFE006240251C
65049+:10518000ACA8000092030005306200FF000210800E
65050+:1051900000501021904200143042000F00431021B3
65051+:1051A0000A000415A20200068CA400049742010420
65052+:1051B0009603000A3088FFFF3042FFFF00461023AD
65053+:1051C0002442FFD60002140001024025ACA80004CE
65054+:1051D000920200079204000524630028000318834C
65055+:1051E0000064182134420004A2030006A202000752
65056+:1051F0008F8200042403FFFB34420002004310248A
65057+:10520000AF820004920300068F87003800031880E5
65058+:10521000007010218C4400203C02FFF63442FFFF56
65059+:105220000082402400671821AE04000CAC68000C1A
65060+:10523000920500063C03FF7F8E02000C00052880CB
65061+:1052400000B020213463FFFF01033024948800263E
65062+:1052500000A7282100431024AE02000CAC860020D9
65063+:10526000AC880024ACA8001024020010A742014022
65064+:1052700024020002A7400142A7400144A742014680
65065+:10528000974201043C0400082442FFFEA742014863
65066+:10529000240200010E00020CA742014A9603000AF4
65067+:1052A0009202000400431021244200023042000711
65068+:1052B00000021023304200070E000235AE0200103B
65069+:1052C0008F6200003C0308008C6304442404001037
65070+:1052D000AF820008974201043042FFFF2442FFFEE4
65071+:1052E00000403821000237C33C0208008C420440D1
65072+:1052F000006718210067282B004610210045102167
65073+:105300003C010800AC2304443C010800AC220440EA
65074+:105310000A0005150000000014A0000500000000B0
65075+:10532000000000000000000D000000002400030A3F
65076+:105330008F4201780440FFFE000000000E00023D95
65077+:1053400027A4001414400005004080210000000044
65078+:105350000000000D00000000240003118E02000078
65079+:105360005440000692020007000000000000000DFB
65080+:10537000000000002400031C9202000730420004D9
65081+:10538000104000058F8200042403FFFB344200021A
65082+:1053900000431024AF8200048F620004044300081D
65083+:1053A00092020007920200068E03000CAE0000007D
65084+:1053B0000002108000501021AC4300209202000730
65085+:1053C00030420004544000099602000A920200058F
65086+:1053D0003C03000100021080005010218C46001890
65087+:1053E00000C33021AC4600189602000A9206000461
65088+:1053F000277100080220202100C2302124C60005A8
65089+:10540000260500140E0005AB00063082920400064B
65090+:105410008F6500043C027FFF000420800091202162
65091+:105420008C8300043442FFFF00A228240065182169
65092+:10543000AC8300049202000792040005920300046A
65093+:10544000304200041040001496070008308400FF2A
65094+:1054500000042080009120218C86000497420104E2
65095+:105460009605000A306300FF3042FFFF0043102121
65096+:105470000045102130E3FFFF004310232442FFD8F2
65097+:1054800030C6FFFF0002140000C23025AC860004C5
65098+:105490000A0004C992030007308500FF0005288038
65099+:1054A00000B128218CA4000097420104306300FF62
65100+:1054B0003042FFFF00431021004710233C03FFFF51
65101+:1054C000008320243042FFFF00822025ACA400008E
65102+:1054D0009203000724020001106200060000000091
65103+:1054E0002402000310620011000000000A0004EC16
65104+:1054F0008E03001097420104920300049605000AEF
65105+:105500008E24000C00431021004510212442FFF29C
65106+:105510003C03FFFF008320243042FFFF0082202550
65107+:10552000AE24000C0A0004EC8E0300109742010424
65108+:10553000920300049605000A8E24001000431021F7
65109+:10554000004510212442FFEE3C03FFFF008320248E
65110+:105550003042FFFF00822025AE2400108E03001091
65111+:105560002402000AA7420140A74301429603000A11
65112+:10557000920200043C04004000431021A742014471
65113+:10558000A740014697420104A742014824020001B6
65114+:105590000E00020CA742014A0E0002350000000076
65115+:1055A0008F6200009203000400002021AF820008F7
65116+:1055B000974201049606000A3042FFFF006218215C
65117+:1055C000006028213C0308008C6304443C0208006E
65118+:1055D0008C42044000651821004410210065382BDE
65119+:1055E000004710213C010800AC2304443C010800A2
65120+:1055F000AC22044092040004008620212484000A86
65121+:105600003084FFFF0E0001E9000000009744010410
65122+:105610003084FFFF0E0001F7000000003C02100084
65123+:10562000AF4201780A0005878F820020148200278C
65124+:105630003062000697420104104000673C024000BF
65125+:105640003062400010400005000000000000000033
65126+:105650000000000D00000000240004208F420178AB
65127+:105660000440FFFE24020800AF4201782402000833
65128+:10567000A7420140A74001428F82000497430104E2
65129+:1056800030420001104000073070FFFF2603FFFE8C
65130+:1056900024020002A7420146A74301480A00053F31
65131+:1056A0002402000DA74001462402000DA742014A32
65132+:1056B0008F62000024040008AF8200080E0001E998
65133+:1056C000000000000A0005190200202110400042DD
65134+:1056D0003C02400093620000304300F024020010BE
65135+:1056E0001062000524020070106200358F820020D5
65136+:1056F0000A000588244200018F62000097430104DC
65137+:105700003050FFFF3071FFFF8F4201780440FFFEF1
65138+:105710003202000700021023304200072403000A6F
65139+:105720002604FFFEA7430140A7420142A7440144CB
65140+:10573000A7400146A75101488F420108304200208E
65141+:10574000144000022403000924030001A743014A76
65142+:105750000E00020C3C0400400E0002350000000068
65143+:105760003C0708008CE70444021110212442FFFE8C
65144+:105770003C0608008CC604400040182100E3382194
65145+:10578000000010218F65000000E3402B00C2302193
65146+:105790002604000800C830213084FFFFAF850008D0
65147+:1057A0003C010800AC2704443C010800AC2604403E
65148+:1057B0000E0001E9000000000A0005190220202166
65149+:1057C0000E00013B000000008F82002024420001F7
65150+:1057D000AF8200203C024000AF4201380A00029232
65151+:1057E000000000003084FFFF30C6FFFF00052C00E2
65152+:1057F00000A628253882FFFF004510210045282BF0
65153+:105800000045102100021C023042FFFF004310211E
65154+:1058100000021C023042FFFF004310213842FFFF0C
65155+:1058200003E000083042FFFF3084FFFF30A5FFFF98
65156+:1058300000001821108000070000000030820001E5
65157+:105840001040000200042042006518210A0005A152
65158+:105850000005284003E000080060102110C0000689
65159+:1058600024C6FFFF8CA2000024A50004AC82000027
65160+:105870000A0005AB2484000403E0000800000000D7
65161+:1058800010A0000824A3FFFFAC8600000000000069
65162+:10589000000000002402FFFF2463FFFF1462FFFAF0
65163+:1058A0002484000403E00008000000000000000160
65164+:1058B0000A00002A00000000000000000000000DA7
65165+:1058C000747870362E322E3162000000060201001C
65166+:1058D00000000000000001360000EA600000000047
65167+:1058E00000000000000000000000000000000000B8
65168+:1058F00000000000000000000000000000000000A8
65169+:105900000000000000000000000000000000000097
65170+:105910000000001600000000000000000000000071
65171+:105920000000000000000000000000000000000077
65172+:105930000000000000000000000000000000000067
65173+:1059400000000000000000000000138800000000BC
65174+:10595000000005DC00000000000000001000000353
65175+:10596000000000000000000D0000000D3C020800D7
65176+:1059700024423D683C0308002463401CAC40000006
65177+:105980000043202B1480FFFD244200043C1D08002E
65178+:1059900037BD7FFC03A0F0213C100800261000A8B2
65179+:1059A0003C1C0800279C3D680E00044E00000000CF
65180+:1059B0000000000D27BDFFB4AFA10000AFA200049E
65181+:1059C000AFA30008AFA4000CAFA50010AFA6001451
65182+:1059D000AFA70018AFA8001CAFA90020AFAA0024F1
65183+:1059E000AFAB0028AFAC002CAFAD0030AFAE003491
65184+:1059F000AFAF0038AFB8003CAFB90040AFBC004417
65185+:105A0000AFBF00480E000591000000008FBF0048A6
65186+:105A10008FBC00448FB900408FB8003C8FAF003876
65187+:105A20008FAE00348FAD00308FAC002C8FAB0028D0
65188+:105A30008FAA00248FA900208FA8001C8FA7001810
65189+:105A40008FA600148FA500108FA4000C8FA3000850
65190+:105A50008FA200048FA1000027BD004C3C1B6004F6
65191+:105A60008F7A5030377B502803400008AF7A00000F
65192+:105A70008F86003C3C0390003C0280000086282575
65193+:105A800000A32025AC4400203C0380008C6700204C
65194+:105A900004E0FFFE0000000003E00008000000003A
65195+:105AA0000A000070240400018F85003C3C04800043
65196+:105AB0003483000100A3102503E00008AC8200201D
65197+:105AC00003E00008000010213084FFFF30A5FFFF35
65198+:105AD00010800007000018213082000110400002F1
65199+:105AE00000042042006518211480FFFB00052840B7
65200+:105AF00003E000080060102110C000070000000053
65201+:105B00008CA2000024C6FFFF24A50004AC82000084
65202+:105B100014C0FFFB2484000403E000080000000020
65203+:105B200010A0000824A3FFFFAC86000000000000C6
65204+:105B3000000000002402FFFF2463FFFF1462FFFA4D
65205+:105B40002484000403E000080000000090AA003153
65206+:105B50008FAB00108CAC00403C0300FF8D6800044C
65207+:105B6000AD6C00208CAD004400E060213462FFFF8A
65208+:105B7000AD6D00248CA700483C09FF000109C0243A
65209+:105B8000AD6700288CAE004C0182C824031978252B
65210+:105B9000AD6F0004AD6E002C8CAD0038314A00FFB3
65211+:105BA000AD6D001C94A900323128FFFFAD680010D4
65212+:105BB00090A70030A5600002A1600004A16700006A
65213+:105BC00090A30032306200FF0002198210600005CD
65214+:105BD000240500011065000E0000000003E000082D
65215+:105BE000A16A00018CD80028354A0080AD780018E1
65216+:105BF0008CCF0014AD6F00148CCE0030AD6E000859
65217+:105C00008CC4002CA16A000103E00008AD64000C04
65218+:105C10008CCD001CAD6D00188CC90014AD6900144A
65219+:105C20008CC80024AD6800088CC70020AD67000C4C
65220+:105C30008CC200148C8300700043C82B1320000713
65221+:105C4000000000008CC20014144CFFE400000000AF
65222+:105C5000354A008003E00008A16A00018C820070D0
65223+:105C60000A0000E6000000009089003027BDFFF820
65224+:105C70008FA8001CA3A900008FA300003C0DFF808B
65225+:105C800035A2FFFF8CAC002C00625824AFAB0000A3
65226+:105C9000A100000400C05821A7A000028D06000446
65227+:105CA00000A048210167C8218FA500000080502175
65228+:105CB0003C18FF7F032C20263C0E00FF2C8C00019B
65229+:105CC000370FFFFF35CDFFFF3C02FF0000AFC824B8
65230+:105CD00000EDC02400C27824000C1DC003236825F9
65231+:105CE00001F87025AD0D0000AD0E00048D240024D8
65232+:105CF000AFAD0000AD0400088D2C00202404FFFF90
65233+:105D0000AD0C000C9547003230E6FFFFAD060010E9
65234+:105D10009145004830A200FF000219C25060000106
65235+:105D20008D240034AD0400148D4700388FAA00186C
65236+:105D300027BD0008AD0B0028AD0A0024AD07001CEC
65237+:105D4000AD00002CAD00001803E00008AD000020FD
65238+:105D500027BDFFE0AFB20018AFB10014AFB0001024
65239+:105D6000AFBF001C9098003000C088213C0D00FFA0
65240+:105D7000330F007FA0CF0000908E003135ACFFFFC5
65241+:105D80003C0AFF00A0CE000194A6001EA220000441
65242+:105D90008CAB00148E29000400A08021016C282403
65243+:105DA000012A40240080902101052025A62600021A
65244+:105DB000AE24000426050020262400080E000092D0
65245+:105DC00024060002924700302605002826240014ED
65246+:105DD00000071E000003160324060004044000030D
65247+:105DE0002403FFFF965900323323FFFF0E00009279
65248+:105DF000AE230010262400248FBF001C8FB2001891
65249+:105E00008FB100148FB00010240500030000302172
65250+:105E10000A00009C27BD002027BDFFD8AFB1001CA1
65251+:105E2000AFB00018AFBF002090A9003024020001DD
65252+:105E300000E050213123003F00A040218FB00040FE
65253+:105E40000080882100C04821106200148FA700380C
65254+:105E5000240B000500A0202100C02821106B001396
65255+:105E6000020030210E000128000000009225007C75
65256+:105E700030A400021080000326030030AE00003082
65257+:105E8000260300348FBF00208FB1001C8FB0001894
65258+:105E90000060102103E0000827BD00280E0000A7C5
65259+:105EA000AFB000100A00016F000000008FA3003C9B
65260+:105EB000010020210120282101403021AFA3001042
65261+:105EC0000E0000EEAFB000140A00016F00000000E9
65262+:105ED0003C06800034C20E008C4400108F850044C4
65263+:105EE000ACA400208C43001803E00008ACA30024FD
65264+:105EF0003C06800034C20E008C4400148F850044A0
65265+:105F0000ACA400208C43001C03E00008ACA30024D8
65266+:105F10009382000C1040001B2483000F2404FFF028
65267+:105F20000064382410E00019978B00109784000E4D
65268+:105F30009389000D3C0A601C0A0001AC01644023F7
65269+:105F400001037021006428231126000231C2FFFFE3
65270+:105F500030A2FFFF0047302B50C0000E00E4482164
65271+:105F60008D4D000C31A3FFFF00036400000C2C03D7
65272+:105F700004A1FFF30000302130637FFF0A0001A479
65273+:105F80002406000103E00008000000009784000ED2
65274+:105F900000E448213123FFFF3168FFFF0068382B00
65275+:105FA00054E0FFF8A783000E938A000D114000050E
65276+:105FB000240F0001006BC023A380000D03E0000844
65277+:105FC000A798000E006BC023A38F000D03E000080C
65278+:105FD000A798000E03E000080000000027BDFFE8BE
65279+:105FE000AFB000103C10800036030140308BFFFF43
65280+:105FF00093AA002BAFBF0014A46B000436040E005C
65281+:106000009488001630C600FF8FA90030A4680006EF
65282+:10601000AC650008A0660012A46A001AAC670020F4
65283+:106020008FA5002CA4690018012020210E000198E2
65284+:10603000AC6500143C021000AE0201788FBF001462
65285+:106040008FB0001003E0000827BD00188F85000006
65286+:106050002484000727BDFFF83084FFF83C06800049
65287+:1060600094CB008A316AFFFFAFAA00008FA900001D
65288+:10607000012540232507FFFF30E31FFF0064102B9D
65289+:106080001440FFF700056882000D288034CC4000E2
65290+:1060900000AC102103E0000827BD00088F8200003B
65291+:1060A0002486000730C5FFF800A2182130641FFFC6
65292+:1060B00003E00008AF8400008F87003C8F84004419
65293+:1060C00027BDFFB0AFB70044AFB40038AFB1002C6C
65294+:1060D000AFBF0048AFB60040AFB5003CAFB300342F
65295+:1060E000AFB20030AFB000283C0B80008C8600249B
65296+:1060F000AD6700808C8A002035670E00356901008D
65297+:10610000ACEA00108C8800248D2500040000B82122
65298+:10611000ACE800188CE3001000A688230000A02142
65299+:10612000ACE300148CE20018ACE2001C122000FE6C
65300+:1061300000E0B021936C0008118000F40000000022
65301+:10614000976F001031EEFFFF022E682B15A000EFB5
65302+:1061500000000000977200103250FFFFAED0000028
65303+:106160003C0380008C740000329300081260FFFD35
65304+:106170000000000096D800088EC700043305FFFF1A
65305+:1061800030B5000112A000E4000000000000000D86
65306+:1061900030BFA0402419004013F9011B30B4A00007
65307+:1061A000128000DF000000009373000812600008F6
65308+:1061B00000000000976D001031ACFFFF00EC202BB9
65309+:1061C0001080000330AE004011C000D50000000078
65310+:1061D000A7850040AF87003893630008022028217C
65311+:1061E000AFB10020146000F527B40020AF60000CB0
65312+:1061F000978F004031F14000162000022403001662
65313+:106200002403000E24054007A363000AAF650014B1
65314+:10621000938A00428F70001431550001001512401E
65315+:1062200002024825AF690014979F00408F78001440
65316+:1062300033F9001003194025AF680014979200400D
65317+:106240003247000810E0016E000000008F67001464
65318+:106250003C1210003C11800000F27825AF6F001452
65319+:1062600036230E00946E000A3C0D81002406000EB9
65320+:1062700031CCFFFF018D2025AF640004A36600022E
65321+:106280009373000A3406FFFC266B0004A36B000A1C
65322+:1062900097980040330820001100015F00000000C3
65323+:1062A0003C05800034A90E00979900409538000CF9
65324+:1062B00097870040001940423312C00031030003A9
65325+:1062C00000127B0330F11000006F6825001172038B
65326+:1062D00001AE6025000C20C0A76400129793004017
65327+:1062E000936A000A001359823175003C02AA1021FA
65328+:1062F0002450003CA3700009953F000C33F93FFF88
65329+:10630000A779001097700012936900090130F821F5
65330+:1063100027E5000230B900070019C0233308000741
65331+:10632000A368000B9371000997720012976F001019
65332+:10633000322700FF8F910038978D004000F218211E
65333+:10634000006F702101C6602131A6004010C0000519
65334+:106350003185FFFF00B1102B3C1280001040001768
65335+:10636000000098210225A82B56A0013E8FA50020F1
65336+:106370003C048000348A0E008D5300143C068000DB
65337+:10638000AD5300108D4B001CAD4B0018AD45000007
65338+:106390008CCD000031AC00081180FFFD34CE0E0022
65339+:1063A00095C3000800A0882100009021A783004029
65340+:1063B0008DC6000424130001AF860038976F0010CB
65341+:1063C00031F5FFFF8E9F000003F1282310A0011F6D
65342+:1063D000AE85000093620008144000DD000000005C
65343+:1063E0000E0001E7240400108F900048004028218F
65344+:1063F0003C023200320600FF000654000142F8253C
65345+:1064000026090001AF890048ACBF0000937900095C
65346+:1064100097780012936F000A332800FF3303FFFFC1
65347+:106420000103382100076C0031EE00FF01AE60254A
65348+:10643000ACAC00048F840048978B0040316A200088
65349+:106440001140010AACA4000897640012308BFFFFD2
65350+:1064500006400108ACAB000C978E004031C5000827
65351+:1064600014A0000226280006262800023C1F8000F7
65352+:1064700037E70E0094F900148CE5001C8F670004C8
65353+:10648000937800023324FFFF330300FFAFA3001013
65354+:106490008F6F0014AFA800180E0001CBAFAF00142F
65355+:1064A000240400100E0001FB000000008E9200008A
65356+:1064B00016400005000000008F7800142403FFBF81
65357+:1064C0000303A024AF7400148F67000C00F5C821EB
65358+:1064D000AF79000C9375000816A0000800000000BA
65359+:1064E00012600006000000008F6800143C0AEFFFF5
65360+:1064F0003549FFFE0109F824AF7F0014A37300089B
65361+:106500008FA500200A00034F02202021AED10000F9
65362+:106510000A00022D3C03800014E0FF1E30BFA040A3
65363+:106520000E0001900000A0212E9100010237B0253D
65364+:1065300012C000188FBF00488F87003C24170F003F
65365+:1065400010F700D43C0680008CD901780720FFFEAC
65366+:10655000241F0F0010FF00F634CA0E008D560014E1
65367+:1065600034C7014024080240ACF600048D49001CE9
65368+:106570003C141000ACE90008A0E00012A4E0001AEE
65369+:10658000ACE00020A4E00018ACE80014ACD4017822
65370+:106590008FBF00488FB700448FB600408FB5003CD6
65371+:1065A0008FB400388FB300348FB200308FB1002C1D
65372+:1065B0008FB0002803E0000827BD00508F910038FD
65373+:1065C000978800403C1280000220A821310700403B
65374+:1065D00014E0FF7C00009821977900108F9200381A
65375+:1065E0003338FFFF131200A8000020210080A021F3
65376+:1065F000108000F300A088211620FECE00000000CD
65377+:106600000A00031F2E9100013C0380008C62017878
65378+:106610000440FFFE240808008F860000AC68017863
65379+:106620003C038000946D008A31ACFFFF0186582343
65380+:10663000256AFFFF31441FFF2C8900081520FFF950
65381+:10664000000000008F8F0048347040008F83003CB2
65382+:1066500000E0A021240E0F0025E70001AF870048CD
65383+:1066600000D03021023488233C08800031F500FF3F
65384+:10667000106E0005240700019398004233130001B7
65385+:106680000013924036470001001524003C0A010027
65386+:10669000008A4825ACC900008F82004830BF003610
65387+:1066A00030B90008ACC200041320009900FF9825FF
65388+:1066B00035120E009650000A8F8700003C0F8100B3
65389+:1066C0003203FFFF24ED000835060140006F60250E
65390+:1066D0003C0E100031AB1FFF269200062405000E71
65391+:1066E000ACCC0020026E9825A4C5001AAF8B000028
65392+:1066F000A4D20018162000083C1080008F89003CAE
65393+:1067000024020F00512200022417000136730040BA
65394+:106710000E0001883C10800036060E008CCB001461
65395+:10672000360A014002402021AD4B00048CC5001CFC
65396+:10673000AD450008A1550012AD5300140E0001989C
65397+:106740003C151000AE1501780A000352000000004D
65398+:10675000936F0009976E0012936D000B31E500FFF7
65399+:1067600000AE202131AC00FF008C80212602000AFF
65400+:106770003050FFFF0E0001E7020020218F86004805
65401+:106780003C0341003C05800024CB0001AF8B004856
65402+:10679000936A00099769001230C600FF315F00FF5D
65403+:1067A0003128FFFF03E8382124F900020006C40065
65404+:1067B0000319782501E37025AC4E00008F6D000CA5
65405+:1067C00034A40E00948B001401B26025AC4C00047C
65406+:1067D0008C85001C8F670004936A00023164FFFF00
65407+:1067E000314900FFAFA900108F680014AFB1001845
65408+:1067F0000E0001CBAFA800140A0002FD0200202108
65409+:10680000AF600004A36000029798004033082000A6
65410+:106810001500FEA300003021A760001297840040FD
65411+:10682000936B000A3C10800030931F0000135183CB
65412+:10683000014BA82126A20028A362000936090E00F8
65413+:10684000953F000C0A000295A77F00108F7000147E
65414+:10685000360900400E000188AF6900140A0002C921
65415+:10686000000000000A00034F000020210641FEFA4C
65416+:10687000ACA0000C8CAC000C3C0D8000018D902570
65417+:106880000A0002EAACB2000C000090210A0002C526
65418+:1068900024130001128000073C028000344B0E00DC
65419+:1068A0009566000830D300401260004900000000E7
65420+:1068B0003C0680008CD001780600FFFE34C50E0037
65421+:1068C00094B500103C03050034CC014032B8FFFF02
65422+:1068D00003039025AD92000C8CAF0014240D200012
65423+:1068E0003C041000AD8F00048CAE001CAD8E00087F
65424+:1068F000A1800012A580001AAD800020A58000189C
65425+:10690000AD8D0014ACC401780A0003263C0680005B
65426+:106910008F9F0000351801402692000227F90008D9
65427+:1069200033281FFFA71200180A000391AF88000048
65428+:106930003C02800034450140ACA0000C1280001BDA
65429+:1069400034530E0034510E008E370010ACB70004E3
65430+:106950008E2400183C0B8000ACA400083570014068
65431+:1069600024040040A20000128FBF0048A600001AB5
65432+:106970008FB70044AE0000208FB60040A60000187C
65433+:106980008FB5003CAE0400148FB400388FB30034D0
65434+:106990008FB200308FB1002C8FB000283C02100065
65435+:1069A00027BD005003E00008AD6201788E66001438
65436+:1069B000ACA600048E64001C0A00042A3C0B800074
65437+:1069C0000E0001902E9100010A0003200237B0252D
65438+:1069D000000000000000000D00000000240003691A
65439+:1069E0000A0004013C06800027BDFFD8AFBF00208D
65440+:1069F0003C0980003C1F20FFAFB200183C0760003C
65441+:106A000035320E002402001037F9FFFDACE23008E9
65442+:106A1000AFB3001CAFB10014AFB00010AE5900000E
65443+:106A20000000000000000000000000000000000066
65444+:106A3000000000003C1800FF3713FFFDAE530000BC
65445+:106A40003C0B60048D7050002411FF7F3C0E00024F
65446+:106A50000211782435EC380C35CD0109ACED4C1819
65447+:106A6000240A0009AD6C50008CE80438AD2A0008F7
65448+:106A7000AD2000148CE54C1C3106FFFF38C42F718B
65449+:106A800000051E023062000F2486C0B310400007CC
65450+:106A9000AF8200088CE54C1C3C09001F3528FC0027
65451+:106AA00000A81824000321C2AF8400048CF1080858
65452+:106AB0003C0F57092412F0000232702435F0001008
65453+:106AC00001D0602601CF68262DAA00012D8B000180
65454+:106AD000014B382550E00009A380000C3C1F601CCE
65455+:106AE0008FF8000824190001A399000C33137C00CF
65456+:106AF000A7930010A780000EA380000DAF80004870
65457+:106B000014C00003AF8000003C066000ACC0442C01
65458+:106B10000E0005B93C1080000E000F1A361101005E
65459+:106B20003C12080026523DD03C13080026733E500C
65460+:106B30008E03000038640001308200011440FFFC25
65461+:106B40003C0B800A8E2600002407FF8024C90240E7
65462+:106B5000312A007F014B402101272824AE06002066
65463+:106B6000AF880044AE0500243C048000AF86003CA2
65464+:106B70008C8C01780580FFFE24180800922F0008F5
65465+:106B8000AC980178A38F0042938E004231CD000172
65466+:106B900011A0000F24050D0024DFF8002FF90301D8
65467+:106BA0001320001C000629C224A4FFF00004104298
65468+:106BB000000231400E00020200D2D8213C02400007
65469+:106BC0003C068000ACC201380A0004A000000000AE
65470+:106BD00010C50023240D0F0010CD00273C1F800896
65471+:106BE00037F9008093380000240E0050330F00FF67
65472+:106BF00015EEFFF33C0240000E000A3600000000D4
65473+:106C00003C0240003C068000ACC201380A0004A0EF
65474+:106C1000000000008F83000400A3402B1500000B30
65475+:106C20008F8B0008006B50212547FFFF00E5482BA4
65476+:106C30001520000600A36023000C19400E0002027C
65477+:106C40000073D8210A0004C43C0240000000000D7B
65478+:106C50000E000202000000000A0004C43C024000D2
65479+:106C60003C1B0800277B3F500E0002020000000082
65480+:106C70000A0004C43C0240003C1B0800277B3F7014
65481+:106C80000E000202000000000A0004C43C024000A2
65482+:106C90003C0660043C09080025290104ACC9502CBD
65483+:106CA0008CC850003C0580003C0200023507008083
65484+:106CB000ACC750003C040800248415A43C03080021
65485+:106CC0002463155CACA50008ACA2000C3C010800D4
65486+:106CD000AC243D603C010800AC233D6403E00008A7
65487+:106CE0002402000100A030213C1C0800279C3D68C4
65488+:106CF0003C0C04003C0B0002008B3826008C402624
65489+:106D00002CE200010007502B2D050001000A4880ED
65490+:106D10003C03080024633D60004520250123182121
65491+:106D20001080000300001021AC6600002402000166
65492+:106D300003E00008000000003C1C0800279C3D68A0
65493+:106D40003C0B04003C0A0002008A3026008B3826E7
65494+:106D50002CC200010006482B2CE5000100094080F0
65495+:106D60003C03080024633D600045202501031821F1
65496+:106D700010800005000010213C0C0800258C155CDB
65497+:106D8000AC6C00002402000103E0000800000000D9
65498+:106D90003C0900023C08040000883026008938269F
65499+:106DA0002CC30001008028212CE400010083102561
65500+:106DB0001040000B000030213C1C0800279C3D685F
65501+:106DC0003C0A80008D4E00082406000101CA682597
65502+:106DD000AD4D00088D4C000C01855825AD4B000CC5
65503+:106DE00003E0000800C010213C1C0800279C3D68FF
65504+:106DF0003C0580008CA6000C000420272402000122
65505+:106E000000C4182403E00008ACA3000C3C020002FC
65506+:106E10001082000B3C0560003C0704001087000353
65507+:106E20000000000003E00008000000008CA908D06A
65508+:106E3000240AFFFD012A402403E00008ACA808D082
65509+:106E40008CA408D02406FFFE0086182403E0000866
65510+:106E5000ACA308D03C05601A34A600108CC3008097
65511+:106E600027BDFFF88CC50084AFA3000093A40000E9
65512+:106E70002402000110820003AFA5000403E0000813
65513+:106E800027BD000893A7000114E0001497AC00028E
65514+:106E900097B800023C0F8000330EFFFC01CF682141
65515+:106EA000ADA50000A3A000003C0660008CC708D080
65516+:106EB0002408FFFE3C04601A00E82824ACC508D072
65517+:106EC0008FA300048FA200003499001027BD000892
65518+:106ED000AF22008003E00008AF2300843C0B800059
65519+:106EE000318AFFFC014B48218D2800000A00057DF6
65520+:106EF000AFA8000427BDFFE8AFBF00103C1C08008E
65521+:106F0000279C3D683C0580008CA4000C8CA20004EA
65522+:106F10003C0300020044282410A0000A00A3182407
65523+:106F20003C0604003C0400021460000900A6102482
65524+:106F30001440000F3C0404000000000D3C1C08003D
65525+:106F4000279C3D688FBF001003E0000827BD001894
65526+:106F50003C0208008C423D600040F809000000003F
65527+:106F60003C1C0800279C3D680A0005A68FBF001046
65528+:106F70003C0208008C423D640040F809000000001B
65529+:106F80000A0005AC00000000000411C003E0000886
65530+:106F9000244202403C04080024843FB42405001A23
65531+:106FA0000A00009C0000302127BDFFE0AFB00010B8
65532+:106FB0003C108000AFBF0018AFB1001436110100C3
65533+:106FC000922200090E0005B63044007F8E3F00007B
65534+:106FD0008F89003C3C0F008003E26021258800403F
65535+:106FE0000049F821240DFF80310E00783198007897
65536+:106FF00035F9000135F100020319382501D1482582
65537+:10700000010D302403ED5824018D2824240A00406A
65538+:1070100024040080240300C0AE0B0024AE0008103E
65539+:10702000AE0A0814AE040818AE03081CAE05080426
65540+:10703000AE070820AE060808AE0908243609090084
65541+:107040009539000C3605098033ED007F3338FFFF9A
65542+:10705000001889C0AE110800AE0F0828952C000C4E
65543+:107060008FBF00188FB10014318BFFFF000B51C090
65544+:10707000AE0A002C8CA400508FB000108CA3003CF2
65545+:107080008D2700048CA8001C8CA600383C0E800ABA
65546+:1070900001AE102127BD0020AF820044AF84005014
65547+:1070A000AF830054AF87004CAF88005C03E000085A
65548+:1070B000AF8600603C09080091293FD924A800024E
65549+:1070C0003C05110000093C0000E8302500C51825EA
65550+:1070D00024820008AC83000003E00008AC800004B8
65551+:1070E0003C098000352309009128010B906A0011AA
65552+:1070F0002402002800804821314700FF00A07021B1
65553+:1071000000C068213108004010E20002340C86DD26
65554+:10711000240C08003C0A800035420A9A944700007B
65555+:10712000354B0A9C35460AA030F9FFFFAD39000007
65556+:107130008D780000354B0A8024040001AD3800042E
65557+:107140008CCF0000AD2F00089165001930A300031B
65558+:107150001064009028640002148000AF240500022F
65559+:107160001065009E240F0003106F00B435450AA47B
65560+:10717000240A0800118A0048000000005100003D68
65561+:107180003C0B80003C0480003483090090670012AF
65562+:1071900030E200FF004D7821000FC8802724000155
65563+:1071A0003C0A8000354F090091E50019354C0980F3
65564+:1071B0008D87002830A300FF0003150000475825E5
65565+:1071C0000004C4003C19600001793025370806FF2F
65566+:1071D000AD260000AD2800048DEA002C25280028EB
65567+:1071E000AD2A00088DEC0030AD2C000C8DE500348C
65568+:1071F000AD2500108DE400383C05800034AC093C1E
65569+:10720000AD2400148DE3001CAD2300188DE7002091
65570+:10721000AD27001C8DE20024AD2200208DF900284E
65571+:1072200034A20100AD3900248D830000AD0E0004AE
65572+:1072300034B90900AD0300008C47000C250200148E
65573+:10724000AD070008932B00123C04080090843FD83F
65574+:10725000AD000010317800FF030D302100064F0013
65575+:1072600000047C00012F702535CDFFFC03E00008F1
65576+:10727000AD0D000C35780900930600123C0508009E
65577+:1072800094A53FC830C800FF010D5021000A60805E
65578+:107290000A00063C018520211500005B000000006B
65579+:1072A0003C08080095083FCE3C06080094C63FC83D
65580+:1072B000010610213C0B800035790900933800113C
65581+:1072C000932A001935660A80330800FF94CF002AFC
65582+:1072D00000086082314500FF978A0058000C1E00AC
65583+:1072E000000524003047FFFF006410250047C0253B
65584+:1072F00001EA30213C0B4000030B402500066400EE
65585+:10730000AD280000AD2C0004932500183C030006B6
65586+:107310002528001400053E0000E31025AD220008DA
65587+:107320008F24002C3C05800034AC093CAD24000CBB
65588+:107330008F38001C34A20100254F0001AD38001029
65589+:107340008D830000AD0E000431EB7FFFAD03000024
65590+:107350008C47000C34B90900A78B0058AD07000812
65591+:10736000932B00123C04080090843FD8250200149F
65592+:10737000317800FF030D302100064F0000047C002F
65593+:10738000012F702535CDFFFCAD00001003E0000893
65594+:10739000AD0D000C3C02080094423FD23C050800B1
65595+:1073A00094A53FC835440AA43C07080094E73FC4AD
65596+:1073B000948B00000045C8210327C023000B1C004C
65597+:1073C0002706FFF200665025AD2A000CAD20001004
65598+:1073D000AD2C00140A00063025290018354F0AA4E8
65599+:1073E00095E50000956400280005140000043C00A9
65600+:1073F0003459810000EC5825AD39000CAD2B00103C
65601+:107400000A000630252900143C0C0800958C3FCE5C
65602+:107410000A000681258200015460FF56240A0800F4
65603+:1074200035580AA49706000000061C00006C502581
65604+:10743000AD2A000C0A000630252900103C03080084
65605+:1074400094633FD23C07080094E73FC83C0F080014
65606+:1074500095EF3FC494A4000095790028006710219F
65607+:10746000004F582300041C00001934002578FFEE5B
65608+:1074700000D87825346A8100AD2A000CAD2F0010A9
65609+:10748000AD200014AD2C00180A0006302529001C80
65610+:1074900003E00008240207D027BDFFE0AFB20018C8
65611+:1074A000AFB10014AFB00010AFBF001C0E00007CE5
65612+:1074B000008088218F8800548F87004C3C0580080D
65613+:1074C00034B20080011128213C1080002402008089
65614+:1074D000240300C000A72023AE0208183C06800841
65615+:1074E000AE03081C18800004AF850054ACC500042E
65616+:1074F0008CC90004AF89004C1220000936040980B1
65617+:107500000E0006F800000000924C00278E0B00745D
65618+:1075100001825004014B3021AE46000C3604098034
65619+:107520008C8E001C8F8F005C01CF682319A0000493
65620+:107530008FBF001C8C90001CAF90005C8FBF001CA4
65621+:107540008FB200188FB100148FB000100A00007EB7
65622+:1075500027BD00208F8600508F8300548F82004CFF
65623+:107560003C05800834A40080AC860050AC83003C0D
65624+:1075700003E00008ACA200043C0308008C63005444
65625+:1075800027BDFFF8308400FF2462000130A500FF12
65626+:107590003C010800AC22005430C600FF3C078000CC
65627+:1075A0008CE801780500FFFE3C0C7FFFA3A40003DC
65628+:1075B0008FAA0000358BFFFF014B4824000627C02F
65629+:1075C00001244025AFA8000034E201009043000AE6
65630+:1075D000A3A000023C1980FFA3A300018FAF00000D
65631+:1075E00030AE007F3738FFFF01F86024000E6E00D8
65632+:1075F0003C0A002034E50140018D58253549200022
65633+:107600002406FF803C04100027BD0008ACAB000C32
65634+:10761000ACA90014A4A00018A0A6001203E0000862
65635+:10762000ACE40178308800FF30A700FF3C03800005
65636+:107630008C6201780440FFFE3C0C8000358A0A0011
65637+:107640008D4B00203584014035850980AC8B0004CA
65638+:107650008D4900240007302B00061540AC89000836
65639+:10766000A088001090A3004CA083002D03E0000828
65640+:10767000A480001827BDFFE8308400FFAFBF0010D2
65641+:107680000E00075D30A500FF8F8300548FBF0010F0
65642+:107690003C06800034C50140344700402404FF907C
65643+:1076A0003C02100027BD0018ACA3000CA0A40012DF
65644+:1076B000ACA7001403E00008ACC2017827BDFFE0CE
65645+:1076C0003C088008AFBF001CAFB20018AFB1001477
65646+:1076D000AFB00010351000808E0600183C07800007
65647+:1076E000309200FF00C72025AE0400180E00007C79
65648+:1076F00030B100FF92030005346200080E00007EE6
65649+:10770000A2020005024020210E000771022028215C
65650+:10771000024020218FBF001C8FB200188FB10014CF
65651+:107720008FB0001024050005240600010A0007326E
65652+:1077300027BD00203C05800034A309809066000826
65653+:1077400030C200081040000F3C0A01013549080A08
65654+:10775000AC8900008CA80074AC8800043C070800C9
65655+:1077600090E73FD830E5001050A00008AC8000083A
65656+:107770003C0D800835AC00808D8B0058AC8B000828
65657+:107780002484000C03E00008008010210A0007B5E3
65658+:107790002484000C27BDFFE83C098000AFB0001036
65659+:1077A000AFBF00143526098090C8000924020006E6
65660+:1077B00000A05821310300FF3527090000808021F7
65661+:1077C000240500041062007B2408000294CF005CB2
65662+:1077D0003C0E020431EDFFFF01AE6025AE0C00004F
65663+:1077E00090CA00083144002010800008000000000A
65664+:1077F00090C2004E3C1F010337F90300305800FFD0
65665+:107800000319302524050008AE06000490F9001184
65666+:1078100090E6001290E40011333800FF00187082E7
65667+:1078200030CF00FF01CF5021014B6821308900FF8C
65668+:1078300031AAFFFF39230028000A60801460002C61
65669+:10784000020C482390E400123C198000372F0100FD
65670+:10785000308C00FF018B1821000310800045F821B7
65671+:10786000001F8400360706FFAD270004373F0900DC
65672+:1078700093EC001193EE0012372609800005C082B8
65673+:107880008DE4000C8CC5003431CD00FF01AB10211C
65674+:107890000058182100A4F8230008840000033F00CA
65675+:1078A00000F0302533F9FFFF318F00FC00D970253F
65676+:1078B0000158202101E9682100045080ADAE000C80
65677+:1078C0000E00007C012A80213C088008240B000463
65678+:1078D000350500800E00007EA0AB000902001021DB
65679+:1078E0008FBF00148FB0001003E0000827BD001800
65680+:1078F00090EC001190E300193C18080097183FCE57
65681+:10790000318200FF0002F882307000FF001FCE00BD
65682+:1079100000103C000327302500D870253C0F4000A4
65683+:1079200001CF68253C198000AD2D0000373F0900CC
65684+:1079300093EC001193EE0012372F010037260980D7
65685+:107940000005C0828DE4000C8CC5003431CD00FFF1
65686+:1079500001AB10210058182100A4F823000884006E
65687+:1079600000033F0000F0302533F9FFFF318F00FCAA
65688+:1079700000D970250158202101E9682100045080B8
65689+:10798000ADAE000C0E00007C012A80213C0880086E
65690+:10799000240B0004350500800E00007EA0AB00091A
65691+:1079A000020010218FBF00148FB0001003E0000808
65692+:1079B00027BD00180A0007C72408001227BDFFD002
65693+:1079C0003C038000AFB60028AFB50024AFB4002060
65694+:1079D000AFB10014AFBF002CAFB3001CAFB20018A2
65695+:1079E000AFB000103467010090E6000B309400FF48
65696+:1079F00030B500FF30C200300000B02110400099C7
65697+:107A000000008821346409809088000800082E0056
65698+:107A100000051E03046000C0240400048F86005487
65699+:107A20003C010800A0243FD83C0C8000AD800048F9
65700+:107A30003C048000348E010091CD000B31A5002064
65701+:107A400010A000073C078000349309809272000860
65702+:107A50000012860000107E0305E000C43C1F800871
65703+:107A600034EC0100918A000B34EB09809169000825
65704+:107A7000314400400004402B3123000800C8982303
65705+:107A80001460000224120003000090213C108000CA
65706+:107A900036180A8036040900970E002C90830011D6
65707+:107AA0009089001293050018307F00FF312800FFF5
65708+:107AB000024810210002C880930D0018033F78216E
65709+:107AC00001F1302130B100FF00D11821A78E0058FC
65710+:107AD0003C010800A4263FCE3C010800A4233FD06F
65711+:107AE00015A00002000000000000000D920B010B29
65712+:107AF0003065FFFF3C010800A4233FD2316A0040FB
65713+:107B00003C010800A4203FC83C010800A4203FC459
65714+:107B10001140000224A4000A24A4000B3091FFFFAE
65715+:107B20000E0001E7022020219206010B3C0C080008
65716+:107B3000958C3FD2004020210006698231A70001C8
65717+:107B40000E00060101872821004020210260282123
65718+:107B50000E00060C024030210E0007A1004020213B
65719+:107B600016C00069004020219212010B32560040DD
65720+:107B700012C000053C0500FF8C93000034AEFFFFEF
65721+:107B8000026E8024AC9000000E0001FB0220202138
65722+:107B90003C0F080091EF3FD831F10003122000168E
65723+:107BA0003C1380088F8200543C09800835280080EF
65724+:107BB000245F0001AD1F003C3C0580088CB9000427
65725+:107BC00003E02021033FC0231B000002AF9F0054AD
65726+:107BD0008CA400040E0006F8ACA400043C0780004E
65727+:107BE0008CEB00743C04800834830080004B5021EF
65728+:107BF000AC6A000C3C1380083670008002802021A3
65729+:107C000002A02821A200006B0E00075D3C1480003A
65730+:107C10008F920054368C0140AD92000C8F86004844
65731+:107C20003C151000344D000624D60001AF960048E4
65732+:107C30008FBF002CA18600128FB60028AD8D0014D6
65733+:107C40008FB3001CAE9501788FB200188FB5002459
65734+:107C50008FB400208FB100148FB0001003E0000833
65735+:107C600027BD003034640980908F0008000F760033
65736+:107C7000000E6E0305A00033347F090093F8001B4B
65737+:107C8000241900103C010800A0393FD8331300022A
65738+:107C90001260FF678F8600548F8200601446FF6574
65739+:107CA0003C0480000E00007C000000003C048008C2
65740+:107CB0003485008090A8000924060016310300FFD7
65741+:107CC0001066000D0000000090AB00093C070800A2
65742+:107CD00090E73FD824090008316400FF34EA00012E
65743+:107CE0003C010800A02A3FD81089002F240C000A6C
65744+:107CF000108C00282402000C0E00007E0000000002
65745+:107D00000A0008608F8600540E0007B9024028213F
65746+:107D10000A0008AE004020213C0B8008356A008034
65747+:107D20008D4600548CE9000C1120FF3DAF860054B5
65748+:107D3000240700143C010800A0273FD80A00085F70
65749+:107D40003C0C800090910008241200023C010800C5
65750+:107D5000A0323FD8323000201200000B2416000160
65751+:107D60008F8600540A0008602411000837F800804C
65752+:107D70008F020038AFE200048FF90004AF19003C15
65753+:107D80000A00086C3C0780008F8600540A000860D7
65754+:107D900024110004A0A200090E00007E00000000D3
65755+:107DA0000A0008608F860054240200140A00093A71
65756+:107DB000A0A2000927BDFFE8AFB000103C10800072
65757+:107DC000AFBF001436020100904400090E00075DA9
65758+:107DD000240500013C0480089099000E3483008043
65759+:107DE000909F000F906F00269089000A33F800FFE3
65760+:107DF00000196E000018740031EC00FF01AE502530
65761+:107E0000000C5A00014B3825312800FF3603014091
65762+:107E10003445600000E830252402FF813C04100056
65763+:107E2000AC66000C8FBF0014AC650014A062001299
65764+:107E3000AE0401788FB0001003E0000827BD0018E1
65765+:107E400027BDFFE8308400FFAFBF00100E00075DC4
65766+:107E500030A500FF3C05800034A4014034470040B9
65767+:107E60002406FF92AC870014A08600128F83005472
65768+:107E70008FBF00103C02100027BD0018AC83000C1F
65769+:107E800003E00008ACA2017827BDFFD8AFB0001016
65770+:107E9000308400FF30B000FF3C058000AFB100141B
65771+:107EA000AFBF0020AFB3001CAFB20018000410C277
65772+:107EB00034A60100320300023051000114600007B3
65773+:107EC00090D200093C098008353300809268000593
65774+:107ED0003107000810E0000C308A00100240202119
65775+:107EE0000E00078302202821240200018FBF0020FA
65776+:107EF0008FB3001C8FB200188FB100148FB0001028
65777+:107F000003E0000827BD00281540003434A50A000E
65778+:107F10008CB800248CAF0008130F004B00003821F0
65779+:107F20003C0D800835B30080926C00682406000286
65780+:107F3000318B00FF116600843C06800034C20100D2
65781+:107F40009263004C90590009307F00FF53F9000400
65782+:107F50003213007C10E00069000000003213007C46
65783+:107F60005660005C0240202116200009320D0001FD
65784+:107F70003C0C800035840100358B0A008D6500249F
65785+:107F80008C86000414A6FFD900001021320D0001D8
65786+:107F900011A0000E024020213C1880003710010083
65787+:107FA0008E0F000C8F8E005011EE000800000000B4
65788+:107FB0000E000843022028218E19000C3C1F800867
65789+:107FC00037F00080AE190050024020210E000771EA
65790+:107FD000022028210A00098F240200013C05080024
65791+:107FE0008CA5006424A400013C010800AC240064BA
65792+:107FF0001600000D00000000022028210E0007716D
65793+:1080000002402021926E0068240C000231CD00FF56
65794+:1080100011AC0022024020210E00094100000000A6
65795+:108020000A00098F240200010E00007024040001E0
65796+:10803000926B0025020B30250E00007EA266002503
65797+:108040000A0009D3022028218E6200188CDF000468
65798+:108050008CB9002400021E0217F9FFB13065007FC1
65799+:108060009268004C264400013093007F1265004066
65800+:10807000310300FF1464FFAB3C0D8008264700016C
65801+:1080800030F1007F30E200FF1225000B24070001D1
65802+:10809000004090210A00099C2411000124050004DD
65803+:1080A0000E000732240600010E0009410000000006
65804+:1080B0000A00098F240200012405FF8002452024C4
65805+:1080C00000859026324200FF004090210A00099C62
65806+:1080D000241100010E00084302202821320700303D
65807+:1080E00010E0FFA132100082024020210E00078321
65808+:1080F000022028210A00098F240200018E6900183D
65809+:108100000240202102202821012640250E0009647A
65810+:10811000AE6800189264004C240500032406000198
65811+:108120000E000732308400FF0E00007024040001AE
65812+:1081300092710025021150250E00007EA26A0025D2
65813+:108140000A00098F240200018E6F00183C1880007D
65814+:108150000240202101F87025022028210E0007711D
65815+:10816000AE6E00189264004C0A000A1B240500043D
65816+:10817000324A0080394900801469FF6A3C0D80084A
65817+:108180000A0009F42647000127BDFFC0AFB0001860
65818+:108190003C108000AFBF0038AFB70034AFB600303E
65819+:1081A000AFB5002CAFB40028AFB30024AFB20020AD
65820+:1081B0000E0005BEAFB1001C360201009045000B59
65821+:1081C0000E00097690440008144000E78FBF003885
65822+:1081D0003C08800835070080A0E0006B3606098067
65823+:1081E00090C50000240300503C17080026F73F907C
65824+:1081F00030A400FF3C13080026733FA01083000347
65825+:108200003C1080000000B82100009821241F0010BD
65826+:108210003611010036120A00361509808E580024E6
65827+:108220008E3400048EAF00208F8C00543C01080077
65828+:10823000A03F3FD836190A80972B002C8EF60000FD
65829+:10824000932A00180298702301EC68233C0108006F
65830+:10825000AC2E3FB43C010800AC2D3FB83C010800F7
65831+:10826000AC2C3FDCA78B005802C0F809315400FF4A
65832+:1082700030490002152000E930420001504000C49E
65833+:108280009227000992A90008312800081500000271
65834+:10829000241500030000A8213C0A80003543090092
65835+:1082A00035440A008C8D00249072001190700012E9
65836+:1082B000907F0011325900FF321100FF02B11021EE
65837+:1082C0000002C08033EF00FF0319B021028F70213C
65838+:1082D00002D4602125CB00103C010800A4363FCE1B
65839+:1082E0003C010800AC2D3FE03C010800A42C3FD02D
65840+:1082F0003C010800A42B3FCC3556010035540980C1
65841+:1083000035510E008F8700548F89005C8E850020C8
65842+:1083100024080006012730233C010800AC283FD484
65843+:1083200000A7282304C000B50000902104A000B3DA
65844+:1083300000C5502B114000B5000000003C010800B2
65845+:10834000AC263FB88E6200000040F8090000000033
65846+:108350003046000214C0007400408021304B000100
65847+:10836000556000118E6200043C0D08008DAD3FBCCD
65848+:108370003C0EC0003C04800001AE6025AE2C000025
65849+:108380008C980000330F000811E0FFFD0000000092
65850+:10839000963F000824120001A79F00408E39000478
65851+:1083A000AF9900388E6200040040F8090000000018
65852+:1083B0000202802532030002146000B300000000B6
65853+:1083C0003C09080095293FC43C06080094C63FD0EC
65854+:1083D0003C0A0800954A3FC63C0708008CE73FBCB2
65855+:1083E000012670213C0308008C633FE03C08080034
65856+:1083F00095083FDA01CA20218ED9000C00E9282116
65857+:10840000249F000200A878210067C02133E4FFFF09
65858+:10841000AF9900503C010800AC383FE03C01080037
65859+:10842000A42F3FC83C010800A42E3FD20E0001E754
65860+:10843000000000008F8D0048004020213C01080012
65861+:10844000A02D3FD98E62000825AC0001AF8C0048FA
65862+:108450000040F809000000008F85005402A0302180
65863+:108460000E00060C004020210E0007A10040202134
65864+:108470008E6B000C0160F809004020213C0A0800C6
65865+:10848000954A3FD23C06080094C63FC601464821A3
65866+:10849000252800020E0001FB3104FFFF3C05080007
65867+:1084A0008CA53FB43C0708008CE73FBC00A7202305
65868+:1084B0003C010800AC243FB414800006000000001A
65869+:1084C0003C0208008C423FD4344B00403C01080081
65870+:1084D000AC2B3FD4124000438F8E00448E2D0010F1
65871+:1084E0008F920044AE4D00208E2C0018AE4C00241C
65872+:1084F0003C04080094843FC80E0006FA0000000007
65873+:108500008F9F00548E6700103C010800AC3F3FDC99
65874+:1085100000E0F809000000003C1908008F393FB462
65875+:108520001720FF798F870054979300583C11800ED5
65876+:10853000321601000E000729A633002C16C0004594
65877+:10854000320300105460004C8EE5000432080040F5
65878+:108550005500001D8EF000088EE4000C0080F80924
65879+:10856000000000008FBF00388FB700348FB6003096
65880+:108570008FB5002C8FB400288FB300248FB2002059
65881+:108580008FB1001C8FB0001803E0000827BD004029
65882+:108590008F86003C36110E0000072E0000A6202515
65883+:1085A000AE0400808E4300208E500024AFA3001044
65884+:1085B000AE2300148FB20010AE320010AE30001C9B
65885+:1085C0000A000A75AE3000180200F8090000000029
65886+:1085D0008EE4000C0080F809000000000A000B2E59
65887+:1085E0008FBF003824180001240F0001A5C000200F
65888+:1085F000A5D800220A000B10ADCF00243C010800D2
65889+:10860000AC203FB80A000AA68E6200003C010800B8
65890+:10861000AC253FB80A000AA68E6200009224000929
65891+:108620000E000771000028218FBF00388FB700347B
65892+:108630008FB600308FB5002C8FB400288FB3002484
65893+:108640008FB200208FB1001C8FB0001803E000082B
65894+:1086500027BD00403C1480009295010900002821AC
65895+:108660000E00084332A400FF320300105060FFB830
65896+:10867000320800408EE5000400A0F8090000000068
65897+:108680000A000B28320800405240FFA89793005878
65898+:108690008E3400148F930044AE7400208E35001C7D
65899+:1086A000AE7500240A000B1F979300588F820014A8
65900+:1086B0000004218003E00008008210213C078008AC
65901+:1086C00034E200809043006900804021106000097E
65902+:1086D0003C0401003C0708008CE73FDC8F8300303E
65903+:1086E00000E32023048000089389001C14E30003A6
65904+:1086F0000100202103E00008008010213C0401005B
65905+:1087000003E00008008010211120000B00673823CF
65906+:108710003C0D800035AC0980918B007C316A0002F1
65907+:10872000114000202409003400E9702B15C0FFF12E
65908+:108730000100202100E938232403FFFC00A3C82402
65909+:1087400000E3C02400F9782B15E0FFEA030820219C
65910+:1087500030C400030004102314C000143049000387
65911+:108760000000302100A9782101E6702100EE682B7D
65912+:1087700011A0FFE03C0401002D3800010006C82BC9
65913+:10878000010548210319382414E0FFDA2524FFFCF1
65914+:108790002402FFFC00A218240068202103E0000846
65915+:1087A000008010210A000B9E240900303C0C800040
65916+:1087B0003586098090CB007C316A00041540FFE9C2
65917+:1087C000240600040A000BAD000030213C03080021
65918+:1087D0008C63005C8F82001827BDFFE0AFBF0018DC
65919+:1087E000AFB1001410620005AFB00010000329C043
65920+:1087F00024A40280AF840014AF8300183C108000D2
65921+:1088000036020A0094450032361101000E000B7F3B
65922+:1088100030A43FFF8E240000241FFF803C11008005
65923+:108820000082C021031F60243309007F000CC9406F
65924+:1088300003294025330E0078362F00033C0D10002D
65925+:10884000010D502501CF5825AE0C002836080980AF
65926+:10885000AE0C080CAE0B082CAE0A08309103006970
65927+:108860003C06800C0126382110600006AF870034DA
65928+:108870008D09003C8D03006C0123382318E0008231
65929+:10888000000000003C0B8008356A00803C1080002E
65930+:10889000A1400069360609808CC200383C06800081
65931+:1088A00034C50A0090A8003C310C00201180001A49
65932+:1088B000AF820030240D00013C0E800035D10A004B
65933+:1088C000A38D001CAF8000248E2400248F850024FB
65934+:1088D000240D0008AF800020AF8000283C01080074
65935+:1088E000A42D3FC63C010800A4203FDA0E000B83F4
65936+:1088F000000030219228003C8FBF00188FB1001477
65937+:108900008FB0001000086142AF82002C27BD00200C
65938+:1089100003E000083182000190B80032240E00010B
65939+:10892000330F00FF000F2182108E00412419000236
65940+:108930001099006434C40AC03C03800034640A0007
65941+:108940008C8F002415E0001E34660900909F0030D3
65942+:108950002418000533F9003F1338004E24030001AA
65943+:108960008F860020A383001CAF860028AF860024DA
65944+:108970003C0E800035D10A008E2400248F8500240F
65945+:10898000240D00083C010800A42D3FC63C0108004E
65946+:10899000A4203FDA0E000B83000000009228003C68
65947+:1089A0008FBF00188FB100148FB000100008614213
65948+:1089B000AF82002C27BD002003E0000831820001B7
65949+:1089C0008C8A00088C8B00248CD000643C0E8000C4
65950+:1089D00035D10A00014B2823AF900024A380001C4E
65951+:1089E000AF8500288E2400248F8600208F850024E8
65952+:1089F000240D00083C010800A42D3FC63C010800DE
65953+:108A0000A4203FDA0E000B83000000009228003CF7
65954+:108A10008FBF00188FB100148FB0001000086142A2
65955+:108A2000AF82002C27BD002003E000083182000146
65956+:108A300090A200303051003F5224002834C50AC0B3
65957+:108A40008CB000241600002234CB09008CA600480C
65958+:108A50003C0A7FFF3545FFFF00C510243C0E800017
65959+:108A6000AF82002035C509008F8800208CAD0060E2
65960+:108A7000010D602B15800002010020218CA40060F4
65961+:108A80000A000C22AF8400208D02006C0A000BFC4F
65962+:108A90003C0680008C8200488F8600203C097FFFC6
65963+:108AA0003527FFFF004788243C0480082403000189
65964+:108AB000AF910028AC80006CA383001C0A000C302E
65965+:108AC000AF8600248C9F00140A000C22AF9F002068
65966+:108AD0008D6200680A000C6C3C0E800034C4098072
65967+:108AE0008C8900708CA300140123382B10E0000443
65968+:108AF000000000008C8200700A000C6C3C0E8000AC
65969+:108B00008CA200140A000C6C3C0E80008F8500249F
65970+:108B100027BDFFE0AFBF0018AFB1001414A00008DC
65971+:108B2000AFB000103C04800034870A0090E60030AB
65972+:108B30002402000530C3003F106200B934840900EC
65973+:108B40008F91002000A080213C048000348E0A0018
65974+:108B50008DCD00043C0608008CC63FB831A73FFF0E
65975+:108B600000E6602B5580000100E03021938F001C4F
65976+:108B700011E0007800D0282B349F098093F9007C05
65977+:108B800033380002130000792403003400C3102B93
65978+:108B9000144000D90000000000C3302300D0282B6F
65979+:108BA0003C010800A4233FC414A0006E0200182159
65980+:108BB0003C0408008C843FB40064402B5500000145
65981+:108BC000006020213C05800034A90A00912A003C65
65982+:108BD0003C010800AC243FBC31430020146000037A
65983+:108BE0000000482134AB0E008D6900188F88002CDE
65984+:108BF0000128202B1080005F000000003C050800C9
65985+:108C00008CA53FBC00A96821010D602B1180005C80
65986+:108C100000B0702B0109382300E028213C01080036
65987+:108C2000AC273FBC12000003240AFFFC10B0008DEB
65988+:108C30003224000300AA18243C010800A4203FDAD3
65989+:108C40003C010800AC233FBC006028218F84002435
65990+:108C5000120400063C0B80088D6C006C0200202181
65991+:108C6000AF91002025900001AD70006C8F8D002821
65992+:108C700000858823AF91002401A52023AF8400281C
65993+:108C80001220000224070018240700103C18800856
65994+:108C90003706008090CF00683C010800A0273FD82D
65995+:108CA0002407000131EE00FF11C70047000000005B
65996+:108CB00014800018000028213C06800034D109806F
65997+:108CC00034CD010091A600098E2C001824C40001A7
65998+:108CD000000C86023205007F308B007F1165007F1B
65999+:108CE0002407FF803C19800837290080A124004C0C
66000+:108CF0003C0808008D083FD4241800023C010800FD
66001+:108D0000A0384019350F00083C010800AC2F3FD4B3
66002+:108D1000240500103C02800034440A009083003C8B
66003+:108D2000307F002013E0000500A02021240A00016C
66004+:108D30003C010800AC2A3FBC34A400018FBF0018DE
66005+:108D40008FB100148FB000100080102103E00008E4
66006+:108D500027BD00203C010800A4203FC410A0FF94C0
66007+:108D6000020018210A000CC000C018210A000CB72C
66008+:108D7000240300303C0508008CA53FBC00B0702BDC
66009+:108D800011C0FFA8000000003C19080097393FC43B
66010+:108D90000325C0210307782B11E000072CAA00044B
66011+:108DA0003C0360008C625404305F003F17E0FFE337
66012+:108DB000240400422CAA00041140FF9A240400421B
66013+:108DC0000A000D248FBF00181528FFB9000000000D
66014+:108DD0008CCA00183C1F800024020002015F182585
66015+:108DE000ACC3001837F90A00A0C200689329003C00
66016+:108DF0002404000400A01021312800203C010800B8
66017+:108E0000A0244019110000022405001024020001D2
66018+:108E10003C010800AC223FB40A000D1A3C0280005D
66019+:108E20008F8800288C8900600109282B14A000027B
66020+:108E3000010088218C9100603C048000348B0E007E
66021+:108E40008D640018240A000102202821022030210C
66022+:108E5000A38A001C0E000B83022080210A000CA6AE
66023+:108E6000AF82002C00045823122000073164000355
66024+:108E70003C0E800035C7098090ED007C31AC0004C9
66025+:108E800015800019248F00043C010800A4243FDA57
66026+:108E90003C1F080097FF3FDA03E5C82100D9C02B2B
66027+:108EA0001300FF6B8F8400242CA6000514C0FFA3C1
66028+:108EB0002404004230A200031440000200A2182340
66029+:108EC00024A3FFFC3C010800AC233FBC3C0108008C
66030+:108ED000A4203FDA0A000CE70060282100C77024B4
66031+:108EE0000A000D0D01C720263C010800A42F3FDA1F
66032+:108EF0000A000D78000000003C010800AC203FBCD7
66033+:108F00000A000D23240400428F8300283C058000C2
66034+:108F100034AA0A00146000060000102191470030B6
66035+:108F20002406000530E400FF108600030000000066
66036+:108F300003E0000800000000914B0048316900FF89
66037+:108F4000000941C21500FFFA3C0680083C040800F5
66038+:108F500094843FC43C0308008C633FDC3C19080048
66039+:108F60008F393FBC3C0F080095EF3FDA0064C02109
66040+:108F70008CCD00040319702101CF602134AB0E00A9
66041+:108F8000018D282318A0001D00000000914F004C07
66042+:108F90008F8C0034956D001031EE00FF8D89000438
66043+:108FA00001AE30238D8A000030CEFFFF000E290075
66044+:108FB0000125C82100003821014720210325182B55
66045+:108FC0000083C021AD990004AD980000918F000A84
66046+:108FD00001CF6821A18D000A956500128F8A0034A7
66047+:108FE000A5450008954B003825690001A5490038C2
66048+:108FF0009148000D35070008A147000D03E0000867
66049+:109000000000000027BDFFD8AFB000189388001CF7
66050+:109010008FB000143C0A80003C197FFF8F8700242A
66051+:109020003738FFFFAFBF0020AFB1001C355F0A002B
66052+:109030000218182493EB003C00087FC03C02BFFFDD
66053+:10904000006F60252CF000013449FFFF3C1F080031
66054+:109050008FFF3FDC8F9900303C18080097183FD2F3
66055+:1090600001897824001047803C07EFFF3C05F0FFA2
66056+:1090700001E818253C1180003169002034E2FFFF2F
66057+:1090800034ADFFFF362E098027A50010240600020C
66058+:1090900003F96023270B0002354A0E0000621824F2
66059+:1090A0000080802115200002000040218D48001C16
66060+:1090B000A7AB0012058000392407000030E800FF4C
66061+:1090C00000083F00006758253C028008AFAB001441
66062+:1090D000344F008091EA00683C08080091083FD9AD
66063+:1090E0003C09DFFF352CFFFF000AF82B3C0208008B
66064+:1090F00094423FCCA3A80011016CC024001FCF40B4
66065+:10910000031918258FA70010AFA300143C0C08000A
66066+:10911000918C3FDBA7A200168FAB001400ED482412
66067+:109120003C0F01003C0A0FFF012FC82531980003B6
66068+:10913000355FFFFF016D40243C027000033F38247F
66069+:1091400000181E0000E2482501037825AFAF001487
66070+:10915000AFA9001091CC007C0E000092A3AC0015CA
66071+:10916000362D0A0091A6003C30C400201080000675
66072+:10917000260200083C11080096313FC8262EFFFF4A
66073+:109180003C010800A42E3FC88FBF00208FB1001CF7
66074+:109190008FB0001803E0000827BD00288F8B002C3B
66075+:1091A000010B502B5540FFC5240700010A000E0497
66076+:1091B00030E800FF9383001C3C02800027BDFFD8ED
66077+:1091C00034480A0000805021AFBF002034460AC056
66078+:1091D000010028211060000E3444098091070030FE
66079+:1091E000240B00058F89002030EC003F118B000B11
66080+:1091F00000003821AFA900103C0B80088D69006C7D
66081+:10920000AFAA00180E00015AAFA90014A380001CD9
66082+:109210008FBF002003E0000827BD00288D1F0048F5
66083+:109220003C1808008F183FBC8F9900283C027FFF34
66084+:109230008D0800443443FFFFAFA900103C0B8008A9
66085+:109240008D69006C03E370240319782101CF682332
66086+:1092500001A83821AFAA00180E00015AAFA90014C6
66087+:109260000A000E58A380001C3C05800034A60A00AA
66088+:1092700090C7003C3C06080094C63FDA3C02080058
66089+:109280008C423FD430E30020000624001060001E12
66090+:10929000004438253C0880083505008090A300680C
66091+:1092A00000004821240800010000282124040001B6
66092+:1092B0003C0680008CCD017805A0FFFE34CF014034
66093+:1092C000ADE800083C0208008C423FDCA5E5000444
66094+:1092D000A5E40006ADE2000C3C04080090843FD9F0
66095+:1092E0003C03800834790080A1E40012ADE700144B
66096+:1092F000A5E900189338004C3C0E1000A1F8002D91
66097+:1093000003E00008ACCE017834A90E008D28001CC3
66098+:109310003C0C08008D8C3FBC952B0016952A001440
66099+:10932000018648213164FFFF0A000E803145FFFFAE
66100+:109330003C04800034830A009065003C30A2002089
66101+:109340001040001934870E00000040210000382131
66102+:10935000000020213C0680008CC901780520FFFE1A
66103+:1093600034CA014034CF010091EB0009AD48000838
66104+:109370003C0E08008DCE3FDC240DFF91240C0040F4
66105+:109380003C081000A5440004A5470006AD4E000CA3
66106+:10939000A14D0012AD4C0014A5400018A14B002DAA
66107+:1093A00003E00008ACC801788CE8001894E60012CD
66108+:1093B00094E4001030C7FFFF0A000EA93084FFFFBD
66109+:1093C0003C04800034830A009065003C30A20020F9
66110+:1093D0001040002727BDFFF82409000100003821B4
66111+:1093E000240800013C0680008CCA01780540FFFE7D
66112+:1093F0003C0280FF34C40100908D00093C0C080041
66113+:10940000918C4019A3AD00038FAB00003185007F24
66114+:109410003459FFFF01665025AFAA00009083000A6F
66115+:10942000A3A0000200057E00A3A300018FB80000E6
66116+:1094300034CB0140240C30000319702401CF68257F
66117+:10944000AD6D000C27BD0008AD6C0014A5600018C0
66118+:10945000AD690008A56700042409FF80A56800061F
66119+:109460003C081000A169001203E00008ACC80178B4
66120+:1094700034870E008CE9001894E6001294E4001082
66121+:1094800030C8FFFF0A000ECD3087FFFF27BDFFE089
66122+:10949000AFB100143C118000AFB00010AFBF001896
66123+:1094A00036380A00970F0032363001000E000B7F6D
66124+:1094B00031E43FFF8E0E0000240DFF803C042000AD
66125+:1094C00001C25821016D6024000C4940316A007FBF
66126+:1094D000012A4025010438253C048008AE270830C5
66127+:1094E0003486008090C500682403000230A200FF8B
66128+:1094F000104300048F9F00208F990024AC9F0068C8
66129+:10950000AC9900648FBF00188FB100148FB00010A9
66130+:1095100003E0000827BD00203C0A0800254A3A80E5
66131+:109520003C09080025293B103C08080025082F1C91
66132+:109530003C07080024E73BDC3C06080024C639044D
66133+:109540003C05080024A536583C0408002484325CFD
66134+:109550003C030800246339B83C0208002442375415
66135+:109560003C010800AC2A3F983C010800AC293F941C
66136+:109570003C010800AC283F903C010800AC273F9C10
66137+:109580003C010800AC263FAC3C010800AC253FA4E0
66138+:109590003C010800AC243FA03C010800AC233FB0D4
66139+:1095A0003C010800AC223FA803E0000800000000D6
66140+:1095B00080000940800009008008010080080080C8
66141+:1095C00080080000800E00008008008080080000F5
66142+:1095D00080000A8080000A00800009808000090065
66143+:00000001FF
66144diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c
66145index eb14e05..5156de7 100644
66146--- a/fs/9p/vfs_addr.c
66147+++ b/fs/9p/vfs_addr.c
66148@@ -187,7 +187,7 @@ static int v9fs_vfs_writepage_locked(struct page *page)
66149
66150 retval = v9fs_file_write_internal(inode,
66151 v9inode->writeback_fid,
66152- (__force const char __user *)buffer,
66153+ (const char __force_user *)buffer,
66154 len, &offset, 0);
66155 if (retval > 0)
66156 retval = 0;
66157diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
66158index 3662f1d..90558b5 100644
66159--- a/fs/9p/vfs_inode.c
66160+++ b/fs/9p/vfs_inode.c
66161@@ -1312,7 +1312,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
66162 void
66163 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
66164 {
66165- char *s = nd_get_link(nd);
66166+ const char *s = nd_get_link(nd);
66167
66168 p9_debug(P9_DEBUG_VFS, " %pd %s\n",
66169 dentry, IS_ERR(s) ? "<error>" : s);
66170diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
66171index 270c481..0d8a962 100644
66172--- a/fs/Kconfig.binfmt
66173+++ b/fs/Kconfig.binfmt
66174@@ -106,7 +106,7 @@ config HAVE_AOUT
66175
66176 config BINFMT_AOUT
66177 tristate "Kernel support for a.out and ECOFF binaries"
66178- depends on HAVE_AOUT
66179+ depends on HAVE_AOUT && BROKEN
66180 ---help---
66181 A.out (Assembler.OUTput) is a set of formats for libraries and
66182 executables used in the earliest versions of UNIX. Linux used
66183diff --git a/fs/afs/inode.c b/fs/afs/inode.c
66184index 8a1d38e..300a14e 100644
66185--- a/fs/afs/inode.c
66186+++ b/fs/afs/inode.c
66187@@ -141,7 +141,7 @@ struct inode *afs_iget_autocell(struct inode *dir, const char *dev_name,
66188 struct afs_vnode *vnode;
66189 struct super_block *sb;
66190 struct inode *inode;
66191- static atomic_t afs_autocell_ino;
66192+ static atomic_unchecked_t afs_autocell_ino;
66193
66194 _enter("{%x:%u},%*.*s,",
66195 AFS_FS_I(dir)->fid.vid, AFS_FS_I(dir)->fid.vnode,
66196@@ -154,7 +154,7 @@ struct inode *afs_iget_autocell(struct inode *dir, const char *dev_name,
66197 data.fid.unique = 0;
66198 data.fid.vnode = 0;
66199
66200- inode = iget5_locked(sb, atomic_inc_return(&afs_autocell_ino),
66201+ inode = iget5_locked(sb, atomic_inc_return_unchecked(&afs_autocell_ino),
66202 afs_iget5_autocell_test, afs_iget5_set,
66203 &data);
66204 if (!inode) {
66205diff --git a/fs/aio.c b/fs/aio.c
66206index a1736e9..c80a8ac 100644
66207--- a/fs/aio.c
66208+++ b/fs/aio.c
66209@@ -409,7 +409,7 @@ static int aio_setup_ring(struct kioctx *ctx)
66210 size += sizeof(struct io_event) * nr_events;
66211
66212 nr_pages = PFN_UP(size);
66213- if (nr_pages < 0)
66214+ if (nr_pages <= 0)
66215 return -EINVAL;
66216
66217 file = aio_private_file(ctx, nr_pages);
66218diff --git a/fs/attr.c b/fs/attr.c
66219index 6530ced..4a827e2 100644
66220--- a/fs/attr.c
66221+++ b/fs/attr.c
66222@@ -102,6 +102,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
66223 unsigned long limit;
66224
66225 limit = rlimit(RLIMIT_FSIZE);
66226+ gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
66227 if (limit != RLIM_INFINITY && offset > limit)
66228 goto out_sig;
66229 if (offset > inode->i_sb->s_maxbytes)
66230diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
66231index 116fd38..c04182da 100644
66232--- a/fs/autofs4/waitq.c
66233+++ b/fs/autofs4/waitq.c
66234@@ -59,7 +59,7 @@ static int autofs4_write(struct autofs_sb_info *sbi,
66235 {
66236 unsigned long sigpipe, flags;
66237 mm_segment_t fs;
66238- const char *data = (const char *)addr;
66239+ const char __user *data = (const char __force_user *)addr;
66240 ssize_t wr = 0;
66241
66242 sigpipe = sigismember(&current->pending.signal, SIGPIPE);
66243@@ -340,6 +340,10 @@ static int validate_request(struct autofs_wait_queue **wait,
66244 return 1;
66245 }
66246
66247+#ifdef CONFIG_GRKERNSEC_HIDESYM
66248+static atomic_unchecked_t autofs_dummy_name_id = ATOMIC_INIT(0);
66249+#endif
66250+
66251 int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
66252 enum autofs_notify notify)
66253 {
66254@@ -385,7 +389,12 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
66255
66256 /* If this is a direct mount request create a dummy name */
66257 if (IS_ROOT(dentry) && autofs_type_trigger(sbi->type))
66258+#ifdef CONFIG_GRKERNSEC_HIDESYM
66259+ /* this name does get written to userland via autofs4_write() */
66260+ qstr.len = sprintf(name, "%08x", atomic_inc_return_unchecked(&autofs_dummy_name_id));
66261+#else
66262 qstr.len = sprintf(name, "%p", dentry);
66263+#endif
66264 else {
66265 qstr.len = autofs4_getpath(sbi, dentry, &name);
66266 if (!qstr.len) {
66267diff --git a/fs/befs/endian.h b/fs/befs/endian.h
66268index 2722387..56059b5 100644
66269--- a/fs/befs/endian.h
66270+++ b/fs/befs/endian.h
66271@@ -11,7 +11,7 @@
66272
66273 #include <asm/byteorder.h>
66274
66275-static inline u64
66276+static inline u64 __intentional_overflow(-1)
66277 fs64_to_cpu(const struct super_block *sb, fs64 n)
66278 {
66279 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
66280@@ -29,7 +29,7 @@ cpu_to_fs64(const struct super_block *sb, u64 n)
66281 return (__force fs64)cpu_to_be64(n);
66282 }
66283
66284-static inline u32
66285+static inline u32 __intentional_overflow(-1)
66286 fs32_to_cpu(const struct super_block *sb, fs32 n)
66287 {
66288 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
66289@@ -47,7 +47,7 @@ cpu_to_fs32(const struct super_block *sb, u32 n)
66290 return (__force fs32)cpu_to_be32(n);
66291 }
66292
66293-static inline u16
66294+static inline u16 __intentional_overflow(-1)
66295 fs16_to_cpu(const struct super_block *sb, fs16 n)
66296 {
66297 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
66298diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
66299index 4c55668..eeae150 100644
66300--- a/fs/binfmt_aout.c
66301+++ b/fs/binfmt_aout.c
66302@@ -16,6 +16,7 @@
66303 #include <linux/string.h>
66304 #include <linux/fs.h>
66305 #include <linux/file.h>
66306+#include <linux/security.h>
66307 #include <linux/stat.h>
66308 #include <linux/fcntl.h>
66309 #include <linux/ptrace.h>
66310@@ -58,6 +59,8 @@ static int aout_core_dump(struct coredump_params *cprm)
66311 #endif
66312 # define START_STACK(u) ((void __user *)u.start_stack)
66313
66314+ memset(&dump, 0, sizeof(dump));
66315+
66316 fs = get_fs();
66317 set_fs(KERNEL_DS);
66318 has_dumped = 1;
66319@@ -68,10 +71,12 @@ static int aout_core_dump(struct coredump_params *cprm)
66320
66321 /* If the size of the dump file exceeds the rlimit, then see what would happen
66322 if we wrote the stack, but not the data area. */
66323+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
66324 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
66325 dump.u_dsize = 0;
66326
66327 /* Make sure we have enough room to write the stack and data areas. */
66328+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
66329 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
66330 dump.u_ssize = 0;
66331
66332@@ -232,6 +237,8 @@ static int load_aout_binary(struct linux_binprm * bprm)
66333 rlim = rlimit(RLIMIT_DATA);
66334 if (rlim >= RLIM_INFINITY)
66335 rlim = ~0;
66336+
66337+ gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
66338 if (ex.a_data + ex.a_bss > rlim)
66339 return -ENOMEM;
66340
66341@@ -261,6 +268,27 @@ static int load_aout_binary(struct linux_binprm * bprm)
66342
66343 install_exec_creds(bprm);
66344
66345+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
66346+ current->mm->pax_flags = 0UL;
66347+#endif
66348+
66349+#ifdef CONFIG_PAX_PAGEEXEC
66350+ if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
66351+ current->mm->pax_flags |= MF_PAX_PAGEEXEC;
66352+
66353+#ifdef CONFIG_PAX_EMUTRAMP
66354+ if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
66355+ current->mm->pax_flags |= MF_PAX_EMUTRAMP;
66356+#endif
66357+
66358+#ifdef CONFIG_PAX_MPROTECT
66359+ if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
66360+ current->mm->pax_flags |= MF_PAX_MPROTECT;
66361+#endif
66362+
66363+ }
66364+#endif
66365+
66366 if (N_MAGIC(ex) == OMAGIC) {
66367 unsigned long text_addr, map_size;
66368 loff_t pos;
66369@@ -312,7 +340,7 @@ static int load_aout_binary(struct linux_binprm * bprm)
66370 return error;
66371
66372 error = vm_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
66373- PROT_READ | PROT_WRITE | PROT_EXEC,
66374+ PROT_READ | PROT_WRITE,
66375 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
66376 fd_offset + ex.a_text);
66377 if (error != N_DATADDR(ex))
66378diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
66379index 8081aba..90a7bdd 100644
66380--- a/fs/binfmt_elf.c
66381+++ b/fs/binfmt_elf.c
66382@@ -34,6 +34,7 @@
66383 #include <linux/utsname.h>
66384 #include <linux/coredump.h>
66385 #include <linux/sched.h>
66386+#include <linux/xattr.h>
66387 #include <asm/uaccess.h>
66388 #include <asm/param.h>
66389 #include <asm/page.h>
66390@@ -47,7 +48,7 @@
66391
66392 static int load_elf_binary(struct linux_binprm *bprm);
66393 static unsigned long elf_map(struct file *, unsigned long, struct elf_phdr *,
66394- int, int, unsigned long);
66395+ int, int, unsigned long) __intentional_overflow(-1);
66396
66397 #ifdef CONFIG_USELIB
66398 static int load_elf_library(struct file *);
66399@@ -65,6 +66,14 @@ static int elf_core_dump(struct coredump_params *cprm);
66400 #define elf_core_dump NULL
66401 #endif
66402
66403+#ifdef CONFIG_PAX_MPROTECT
66404+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
66405+#endif
66406+
66407+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
66408+static void elf_handle_mmap(struct file *file);
66409+#endif
66410+
66411 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
66412 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
66413 #else
66414@@ -84,6 +93,15 @@ static struct linux_binfmt elf_format = {
66415 .load_binary = load_elf_binary,
66416 .load_shlib = load_elf_library,
66417 .core_dump = elf_core_dump,
66418+
66419+#ifdef CONFIG_PAX_MPROTECT
66420+ .handle_mprotect= elf_handle_mprotect,
66421+#endif
66422+
66423+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
66424+ .handle_mmap = elf_handle_mmap,
66425+#endif
66426+
66427 .min_coredump = ELF_EXEC_PAGESIZE,
66428 };
66429
66430@@ -91,6 +109,8 @@ static struct linux_binfmt elf_format = {
66431
66432 static int set_brk(unsigned long start, unsigned long end)
66433 {
66434+ unsigned long e = end;
66435+
66436 start = ELF_PAGEALIGN(start);
66437 end = ELF_PAGEALIGN(end);
66438 if (end > start) {
66439@@ -99,7 +119,7 @@ static int set_brk(unsigned long start, unsigned long end)
66440 if (BAD_ADDR(addr))
66441 return addr;
66442 }
66443- current->mm->start_brk = current->mm->brk = end;
66444+ current->mm->start_brk = current->mm->brk = e;
66445 return 0;
66446 }
66447
66448@@ -160,12 +180,13 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
66449 elf_addr_t __user *u_rand_bytes;
66450 const char *k_platform = ELF_PLATFORM;
66451 const char *k_base_platform = ELF_BASE_PLATFORM;
66452- unsigned char k_rand_bytes[16];
66453+ u32 k_rand_bytes[4];
66454 int items;
66455 elf_addr_t *elf_info;
66456 int ei_index = 0;
66457 const struct cred *cred = current_cred();
66458 struct vm_area_struct *vma;
66459+ unsigned long saved_auxv[AT_VECTOR_SIZE];
66460
66461 /*
66462 * In some cases (e.g. Hyper-Threading), we want to avoid L1
66463@@ -207,8 +228,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
66464 * Generate 16 random bytes for userspace PRNG seeding.
66465 */
66466 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
66467- u_rand_bytes = (elf_addr_t __user *)
66468- STACK_ALLOC(p, sizeof(k_rand_bytes));
66469+ prandom_seed(k_rand_bytes[0] ^ prandom_u32());
66470+ prandom_seed(k_rand_bytes[1] ^ prandom_u32());
66471+ prandom_seed(k_rand_bytes[2] ^ prandom_u32());
66472+ prandom_seed(k_rand_bytes[3] ^ prandom_u32());
66473+ p = STACK_ROUND(p, sizeof(k_rand_bytes));
66474+ u_rand_bytes = (elf_addr_t __user *) p;
66475 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
66476 return -EFAULT;
66477
66478@@ -323,9 +348,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
66479 return -EFAULT;
66480 current->mm->env_end = p;
66481
66482+ memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
66483+
66484 /* Put the elf_info on the stack in the right place. */
66485 sp = (elf_addr_t __user *)envp + 1;
66486- if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
66487+ if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
66488 return -EFAULT;
66489 return 0;
66490 }
66491@@ -514,14 +541,14 @@ static inline int arch_check_elf(struct elfhdr *ehdr, bool has_interp,
66492 an ELF header */
66493
66494 static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
66495- struct file *interpreter, unsigned long *interp_map_addr,
66496+ struct file *interpreter,
66497 unsigned long no_base, struct elf_phdr *interp_elf_phdata)
66498 {
66499 struct elf_phdr *eppnt;
66500- unsigned long load_addr = 0;
66501+ unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
66502 int load_addr_set = 0;
66503 unsigned long last_bss = 0, elf_bss = 0;
66504- unsigned long error = ~0UL;
66505+ unsigned long error = -EINVAL;
66506 unsigned long total_size;
66507 int i;
66508
66509@@ -541,6 +568,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
66510 goto out;
66511 }
66512
66513+#ifdef CONFIG_PAX_SEGMEXEC
66514+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
66515+ pax_task_size = SEGMEXEC_TASK_SIZE;
66516+#endif
66517+
66518 eppnt = interp_elf_phdata;
66519 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
66520 if (eppnt->p_type == PT_LOAD) {
66521@@ -564,8 +596,6 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
66522 map_addr = elf_map(interpreter, load_addr + vaddr,
66523 eppnt, elf_prot, elf_type, total_size);
66524 total_size = 0;
66525- if (!*interp_map_addr)
66526- *interp_map_addr = map_addr;
66527 error = map_addr;
66528 if (BAD_ADDR(map_addr))
66529 goto out;
66530@@ -584,8 +614,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
66531 k = load_addr + eppnt->p_vaddr;
66532 if (BAD_ADDR(k) ||
66533 eppnt->p_filesz > eppnt->p_memsz ||
66534- eppnt->p_memsz > TASK_SIZE ||
66535- TASK_SIZE - eppnt->p_memsz < k) {
66536+ eppnt->p_memsz > pax_task_size ||
66537+ pax_task_size - eppnt->p_memsz < k) {
66538 error = -ENOMEM;
66539 goto out;
66540 }
66541@@ -624,9 +654,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
66542 elf_bss = ELF_PAGESTART(elf_bss + ELF_MIN_ALIGN - 1);
66543
66544 /* Map the last of the bss segment */
66545- error = vm_brk(elf_bss, last_bss - elf_bss);
66546- if (BAD_ADDR(error))
66547- goto out;
66548+ if (last_bss > elf_bss) {
66549+ error = vm_brk(elf_bss, last_bss - elf_bss);
66550+ if (BAD_ADDR(error))
66551+ goto out;
66552+ }
66553 }
66554
66555 error = load_addr;
66556@@ -634,6 +666,336 @@ out:
66557 return error;
66558 }
66559
66560+#ifdef CONFIG_PAX_PT_PAX_FLAGS
66561+#ifdef CONFIG_PAX_SOFTMODE
66562+static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf_phdata)
66563+{
66564+ unsigned long pax_flags = 0UL;
66565+
66566+#ifdef CONFIG_PAX_PAGEEXEC
66567+ if (elf_phdata->p_flags & PF_PAGEEXEC)
66568+ pax_flags |= MF_PAX_PAGEEXEC;
66569+#endif
66570+
66571+#ifdef CONFIG_PAX_SEGMEXEC
66572+ if (elf_phdata->p_flags & PF_SEGMEXEC)
66573+ pax_flags |= MF_PAX_SEGMEXEC;
66574+#endif
66575+
66576+#ifdef CONFIG_PAX_EMUTRAMP
66577+ if ((elf_phdata->p_flags & PF_EMUTRAMP) && (pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)))
66578+ pax_flags |= MF_PAX_EMUTRAMP;
66579+#endif
66580+
66581+#ifdef CONFIG_PAX_MPROTECT
66582+ if (elf_phdata->p_flags & PF_MPROTECT)
66583+ pax_flags |= MF_PAX_MPROTECT;
66584+#endif
66585+
66586+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
66587+ if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
66588+ pax_flags |= MF_PAX_RANDMMAP;
66589+#endif
66590+
66591+ return pax_flags;
66592+}
66593+#endif
66594+
66595+static unsigned long pax_parse_pt_pax_hardmode(const struct elf_phdr * const elf_phdata)
66596+{
66597+ unsigned long pax_flags = 0UL;
66598+
66599+#ifdef CONFIG_PAX_PAGEEXEC
66600+ if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
66601+ pax_flags |= MF_PAX_PAGEEXEC;
66602+#endif
66603+
66604+#ifdef CONFIG_PAX_SEGMEXEC
66605+ if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
66606+ pax_flags |= MF_PAX_SEGMEXEC;
66607+#endif
66608+
66609+#ifdef CONFIG_PAX_EMUTRAMP
66610+ if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
66611+ pax_flags |= MF_PAX_EMUTRAMP;
66612+#endif
66613+
66614+#ifdef CONFIG_PAX_MPROTECT
66615+ if (!(elf_phdata->p_flags & PF_NOMPROTECT))
66616+ pax_flags |= MF_PAX_MPROTECT;
66617+#endif
66618+
66619+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
66620+ if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
66621+ pax_flags |= MF_PAX_RANDMMAP;
66622+#endif
66623+
66624+ return pax_flags;
66625+}
66626+#endif
66627+
66628+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
66629+#ifdef CONFIG_PAX_SOFTMODE
66630+static unsigned long pax_parse_xattr_pax_softmode(unsigned long pax_flags_softmode)
66631+{
66632+ unsigned long pax_flags = 0UL;
66633+
66634+#ifdef CONFIG_PAX_PAGEEXEC
66635+ if (pax_flags_softmode & MF_PAX_PAGEEXEC)
66636+ pax_flags |= MF_PAX_PAGEEXEC;
66637+#endif
66638+
66639+#ifdef CONFIG_PAX_SEGMEXEC
66640+ if (pax_flags_softmode & MF_PAX_SEGMEXEC)
66641+ pax_flags |= MF_PAX_SEGMEXEC;
66642+#endif
66643+
66644+#ifdef CONFIG_PAX_EMUTRAMP
66645+ if ((pax_flags_softmode & MF_PAX_EMUTRAMP) && (pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)))
66646+ pax_flags |= MF_PAX_EMUTRAMP;
66647+#endif
66648+
66649+#ifdef CONFIG_PAX_MPROTECT
66650+ if (pax_flags_softmode & MF_PAX_MPROTECT)
66651+ pax_flags |= MF_PAX_MPROTECT;
66652+#endif
66653+
66654+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
66655+ if (randomize_va_space && (pax_flags_softmode & MF_PAX_RANDMMAP))
66656+ pax_flags |= MF_PAX_RANDMMAP;
66657+#endif
66658+
66659+ return pax_flags;
66660+}
66661+#endif
66662+
66663+static unsigned long pax_parse_xattr_pax_hardmode(unsigned long pax_flags_hardmode)
66664+{
66665+ unsigned long pax_flags = 0UL;
66666+
66667+#ifdef CONFIG_PAX_PAGEEXEC
66668+ if (!(pax_flags_hardmode & MF_PAX_PAGEEXEC))
66669+ pax_flags |= MF_PAX_PAGEEXEC;
66670+#endif
66671+
66672+#ifdef CONFIG_PAX_SEGMEXEC
66673+ if (!(pax_flags_hardmode & MF_PAX_SEGMEXEC))
66674+ pax_flags |= MF_PAX_SEGMEXEC;
66675+#endif
66676+
66677+#ifdef CONFIG_PAX_EMUTRAMP
66678+ if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP))
66679+ pax_flags |= MF_PAX_EMUTRAMP;
66680+#endif
66681+
66682+#ifdef CONFIG_PAX_MPROTECT
66683+ if (!(pax_flags_hardmode & MF_PAX_MPROTECT))
66684+ pax_flags |= MF_PAX_MPROTECT;
66685+#endif
66686+
66687+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
66688+ if (randomize_va_space && !(pax_flags_hardmode & MF_PAX_RANDMMAP))
66689+ pax_flags |= MF_PAX_RANDMMAP;
66690+#endif
66691+
66692+ return pax_flags;
66693+}
66694+#endif
66695+
66696+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
66697+static unsigned long pax_parse_defaults(void)
66698+{
66699+ unsigned long pax_flags = 0UL;
66700+
66701+#ifdef CONFIG_PAX_SOFTMODE
66702+ if (pax_softmode)
66703+ return pax_flags;
66704+#endif
66705+
66706+#ifdef CONFIG_PAX_PAGEEXEC
66707+ pax_flags |= MF_PAX_PAGEEXEC;
66708+#endif
66709+
66710+#ifdef CONFIG_PAX_SEGMEXEC
66711+ pax_flags |= MF_PAX_SEGMEXEC;
66712+#endif
66713+
66714+#ifdef CONFIG_PAX_MPROTECT
66715+ pax_flags |= MF_PAX_MPROTECT;
66716+#endif
66717+
66718+#ifdef CONFIG_PAX_RANDMMAP
66719+ if (randomize_va_space)
66720+ pax_flags |= MF_PAX_RANDMMAP;
66721+#endif
66722+
66723+ return pax_flags;
66724+}
66725+
66726+static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
66727+{
66728+ unsigned long pax_flags = PAX_PARSE_FLAGS_FALLBACK;
66729+
66730+#ifdef CONFIG_PAX_EI_PAX
66731+
66732+#ifdef CONFIG_PAX_SOFTMODE
66733+ if (pax_softmode)
66734+ return pax_flags;
66735+#endif
66736+
66737+ pax_flags = 0UL;
66738+
66739+#ifdef CONFIG_PAX_PAGEEXEC
66740+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
66741+ pax_flags |= MF_PAX_PAGEEXEC;
66742+#endif
66743+
66744+#ifdef CONFIG_PAX_SEGMEXEC
66745+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
66746+ pax_flags |= MF_PAX_SEGMEXEC;
66747+#endif
66748+
66749+#ifdef CONFIG_PAX_EMUTRAMP
66750+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
66751+ pax_flags |= MF_PAX_EMUTRAMP;
66752+#endif
66753+
66754+#ifdef CONFIG_PAX_MPROTECT
66755+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
66756+ pax_flags |= MF_PAX_MPROTECT;
66757+#endif
66758+
66759+#ifdef CONFIG_PAX_ASLR
66760+ if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
66761+ pax_flags |= MF_PAX_RANDMMAP;
66762+#endif
66763+
66764+#endif
66765+
66766+ return pax_flags;
66767+
66768+}
66769+
66770+static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
66771+{
66772+
66773+#ifdef CONFIG_PAX_PT_PAX_FLAGS
66774+ unsigned long i;
66775+
66776+ for (i = 0UL; i < elf_ex->e_phnum; i++)
66777+ if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
66778+ if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
66779+ ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
66780+ ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
66781+ ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
66782+ ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
66783+ return PAX_PARSE_FLAGS_FALLBACK;
66784+
66785+#ifdef CONFIG_PAX_SOFTMODE
66786+ if (pax_softmode)
66787+ return pax_parse_pt_pax_softmode(&elf_phdata[i]);
66788+ else
66789+#endif
66790+
66791+ return pax_parse_pt_pax_hardmode(&elf_phdata[i]);
66792+ break;
66793+ }
66794+#endif
66795+
66796+ return PAX_PARSE_FLAGS_FALLBACK;
66797+}
66798+
66799+static unsigned long pax_parse_xattr_pax(struct file * const file)
66800+{
66801+
66802+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
66803+ ssize_t xattr_size, i;
66804+ unsigned char xattr_value[sizeof("pemrs") - 1];
66805+ unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL;
66806+
66807+ xattr_size = pax_getxattr(file->f_path.dentry, xattr_value, sizeof xattr_value);
66808+ if (xattr_size < 0 || xattr_size > sizeof xattr_value)
66809+ return PAX_PARSE_FLAGS_FALLBACK;
66810+
66811+ for (i = 0; i < xattr_size; i++)
66812+ switch (xattr_value[i]) {
66813+ default:
66814+ return PAX_PARSE_FLAGS_FALLBACK;
66815+
66816+#define parse_flag(option1, option2, flag) \
66817+ case option1: \
66818+ if (pax_flags_hardmode & MF_PAX_##flag) \
66819+ return PAX_PARSE_FLAGS_FALLBACK;\
66820+ pax_flags_hardmode |= MF_PAX_##flag; \
66821+ break; \
66822+ case option2: \
66823+ if (pax_flags_softmode & MF_PAX_##flag) \
66824+ return PAX_PARSE_FLAGS_FALLBACK;\
66825+ pax_flags_softmode |= MF_PAX_##flag; \
66826+ break;
66827+
66828+ parse_flag('p', 'P', PAGEEXEC);
66829+ parse_flag('e', 'E', EMUTRAMP);
66830+ parse_flag('m', 'M', MPROTECT);
66831+ parse_flag('r', 'R', RANDMMAP);
66832+ parse_flag('s', 'S', SEGMEXEC);
66833+
66834+#undef parse_flag
66835+ }
66836+
66837+ if (pax_flags_hardmode & pax_flags_softmode)
66838+ return PAX_PARSE_FLAGS_FALLBACK;
66839+
66840+#ifdef CONFIG_PAX_SOFTMODE
66841+ if (pax_softmode)
66842+ return pax_parse_xattr_pax_softmode(pax_flags_softmode);
66843+ else
66844+#endif
66845+
66846+ return pax_parse_xattr_pax_hardmode(pax_flags_hardmode);
66847+#else
66848+ return PAX_PARSE_FLAGS_FALLBACK;
66849+#endif
66850+
66851+}
66852+
66853+static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata, struct file * const file)
66854+{
66855+ unsigned long pax_flags, ei_pax_flags, pt_pax_flags, xattr_pax_flags;
66856+
66857+ pax_flags = pax_parse_defaults();
66858+ ei_pax_flags = pax_parse_ei_pax(elf_ex);
66859+ pt_pax_flags = pax_parse_pt_pax(elf_ex, elf_phdata);
66860+ xattr_pax_flags = pax_parse_xattr_pax(file);
66861+
66862+ if (pt_pax_flags != PAX_PARSE_FLAGS_FALLBACK &&
66863+ xattr_pax_flags != PAX_PARSE_FLAGS_FALLBACK &&
66864+ pt_pax_flags != xattr_pax_flags)
66865+ return -EINVAL;
66866+ if (xattr_pax_flags != PAX_PARSE_FLAGS_FALLBACK)
66867+ pax_flags = xattr_pax_flags;
66868+ else if (pt_pax_flags != PAX_PARSE_FLAGS_FALLBACK)
66869+ pax_flags = pt_pax_flags;
66870+ else if (ei_pax_flags != PAX_PARSE_FLAGS_FALLBACK)
66871+ pax_flags = ei_pax_flags;
66872+
66873+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
66874+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
66875+ if ((__supported_pte_mask & _PAGE_NX))
66876+ pax_flags &= ~MF_PAX_SEGMEXEC;
66877+ else
66878+ pax_flags &= ~MF_PAX_PAGEEXEC;
66879+ }
66880+#endif
66881+
66882+ if (0 > pax_check_flags(&pax_flags))
66883+ return -EINVAL;
66884+
66885+ current->mm->pax_flags = pax_flags;
66886+ return 0;
66887+}
66888+#endif
66889+
66890 /*
66891 * These are the functions used to load ELF style executables and shared
66892 * libraries. There is no binary dependent code anywhere else.
66893@@ -647,6 +1009,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
66894 {
66895 unsigned long random_variable = 0;
66896
66897+#ifdef CONFIG_PAX_RANDUSTACK
66898+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
66899+ return stack_top - current->mm->delta_stack;
66900+#endif
66901+
66902 if ((current->flags & PF_RANDOMIZE) &&
66903 !(current->personality & ADDR_NO_RANDOMIZE)) {
66904 random_variable = (unsigned long) get_random_int();
66905@@ -666,7 +1033,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
66906 unsigned long load_addr = 0, load_bias = 0;
66907 int load_addr_set = 0;
66908 char * elf_interpreter = NULL;
66909- unsigned long error;
66910+ unsigned long error = 0;
66911 struct elf_phdr *elf_ppnt, *elf_phdata, *interp_elf_phdata = NULL;
66912 unsigned long elf_bss, elf_brk;
66913 int retval, i;
66914@@ -681,6 +1048,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
66915 struct elfhdr interp_elf_ex;
66916 } *loc;
66917 struct arch_elf_state arch_state = INIT_ARCH_ELF_STATE;
66918+ unsigned long pax_task_size;
66919
66920 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
66921 if (!loc) {
66922@@ -839,6 +1207,77 @@ static int load_elf_binary(struct linux_binprm *bprm)
66923 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
66924 may depend on the personality. */
66925 SET_PERSONALITY2(loc->elf_ex, &arch_state);
66926+
66927+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
66928+ current->mm->pax_flags = 0UL;
66929+#endif
66930+
66931+#ifdef CONFIG_PAX_DLRESOLVE
66932+ current->mm->call_dl_resolve = 0UL;
66933+#endif
66934+
66935+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
66936+ current->mm->call_syscall = 0UL;
66937+#endif
66938+
66939+#ifdef CONFIG_PAX_ASLR
66940+ current->mm->delta_mmap = 0UL;
66941+ current->mm->delta_stack = 0UL;
66942+#endif
66943+
66944+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
66945+ if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
66946+ send_sig(SIGKILL, current, 0);
66947+ goto out_free_dentry;
66948+ }
66949+#endif
66950+
66951+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
66952+ pax_set_initial_flags(bprm);
66953+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
66954+ if (pax_set_initial_flags_func)
66955+ (pax_set_initial_flags_func)(bprm);
66956+#endif
66957+
66958+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
66959+ if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
66960+ current->mm->context.user_cs_limit = PAGE_SIZE;
66961+ current->mm->def_flags |= VM_PAGEEXEC | VM_NOHUGEPAGE;
66962+ }
66963+#endif
66964+
66965+#ifdef CONFIG_PAX_SEGMEXEC
66966+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
66967+ current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
66968+ current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
66969+ pax_task_size = SEGMEXEC_TASK_SIZE;
66970+ current->mm->def_flags |= VM_NOHUGEPAGE;
66971+ } else
66972+#endif
66973+
66974+ pax_task_size = TASK_SIZE;
66975+
66976+#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
66977+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
66978+ set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
66979+ put_cpu();
66980+ }
66981+#endif
66982+
66983+#ifdef CONFIG_PAX_ASLR
66984+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
66985+ current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
66986+ current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
66987+ }
66988+#endif
66989+
66990+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
66991+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
66992+ executable_stack = EXSTACK_DISABLE_X;
66993+ current->personality &= ~READ_IMPLIES_EXEC;
66994+ } else
66995+#endif
66996+
66997 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
66998 current->personality |= READ_IMPLIES_EXEC;
66999
67000@@ -925,8 +1364,21 @@ static int load_elf_binary(struct linux_binprm *bprm)
67001 #else
67002 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
67003 #endif
67004- total_size = total_mapping_size(elf_phdata,
67005- loc->elf_ex.e_phnum);
67006+
67007+#ifdef CONFIG_PAX_RANDMMAP
67008+ /* PaX: randomize base address at the default exe base if requested */
67009+ if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
67010+#ifdef CONFIG_SPARC64
67011+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
67012+#else
67013+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
67014+#endif
67015+ load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
67016+ elf_flags |= MAP_FIXED;
67017+ }
67018+#endif
67019+
67020+ total_size = total_mapping_size(elf_phdata, loc->elf_ex.e_phnum);
67021 if (!total_size) {
67022 retval = -EINVAL;
67023 goto out_free_dentry;
67024@@ -962,9 +1414,9 @@ static int load_elf_binary(struct linux_binprm *bprm)
67025 * allowed task size. Note that p_filesz must always be
67026 * <= p_memsz so it is only necessary to check p_memsz.
67027 */
67028- if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
67029- elf_ppnt->p_memsz > TASK_SIZE ||
67030- TASK_SIZE - elf_ppnt->p_memsz < k) {
67031+ if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
67032+ elf_ppnt->p_memsz > pax_task_size ||
67033+ pax_task_size - elf_ppnt->p_memsz < k) {
67034 /* set_brk can never work. Avoid overflows. */
67035 retval = -EINVAL;
67036 goto out_free_dentry;
67037@@ -1000,16 +1452,43 @@ static int load_elf_binary(struct linux_binprm *bprm)
67038 if (retval)
67039 goto out_free_dentry;
67040 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
67041- retval = -EFAULT; /* Nobody gets to see this, but.. */
67042- goto out_free_dentry;
67043+ /*
67044+ * This bss-zeroing can fail if the ELF
67045+ * file specifies odd protections. So
67046+ * we don't check the return value
67047+ */
67048 }
67049
67050+#ifdef CONFIG_PAX_RANDMMAP
67051+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
67052+ unsigned long start, size, flags;
67053+ vm_flags_t vm_flags;
67054+
67055+ start = ELF_PAGEALIGN(elf_brk);
67056+ size = PAGE_SIZE + ((pax_get_random_long() & ((1UL << 22) - 1UL)) << 4);
67057+ flags = MAP_FIXED | MAP_PRIVATE;
67058+ vm_flags = VM_DONTEXPAND | VM_DONTDUMP;
67059+
67060+ down_write(&current->mm->mmap_sem);
67061+ start = get_unmapped_area(NULL, start, PAGE_ALIGN(size), 0, flags);
67062+ retval = -ENOMEM;
67063+ if (!IS_ERR_VALUE(start) && !find_vma_intersection(current->mm, start, start + size + PAGE_SIZE)) {
67064+// if (current->personality & ADDR_NO_RANDOMIZE)
67065+// vm_flags |= VM_READ | VM_MAYREAD;
67066+ start = mmap_region(NULL, start, PAGE_ALIGN(size), vm_flags, 0);
67067+ retval = IS_ERR_VALUE(start) ? start : 0;
67068+ }
67069+ up_write(&current->mm->mmap_sem);
67070+ if (retval == 0)
67071+ retval = set_brk(start + size, start + size + PAGE_SIZE);
67072+ if (retval < 0)
67073+ goto out_free_dentry;
67074+ }
67075+#endif
67076+
67077 if (elf_interpreter) {
67078- unsigned long interp_map_addr = 0;
67079-
67080 elf_entry = load_elf_interp(&loc->interp_elf_ex,
67081 interpreter,
67082- &interp_map_addr,
67083 load_bias, interp_elf_phdata);
67084 if (!IS_ERR((void *)elf_entry)) {
67085 /*
67086@@ -1237,7 +1716,7 @@ static bool always_dump_vma(struct vm_area_struct *vma)
67087 * Decide what to dump of a segment, part, all or none.
67088 */
67089 static unsigned long vma_dump_size(struct vm_area_struct *vma,
67090- unsigned long mm_flags)
67091+ unsigned long mm_flags, long signr)
67092 {
67093 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
67094
67095@@ -1275,7 +1754,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
67096 if (vma->vm_file == NULL)
67097 return 0;
67098
67099- if (FILTER(MAPPED_PRIVATE))
67100+ if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
67101 goto whole;
67102
67103 /*
67104@@ -1482,9 +1961,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
67105 {
67106 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
67107 int i = 0;
67108- do
67109+ do {
67110 i += 2;
67111- while (auxv[i - 2] != AT_NULL);
67112+ } while (auxv[i - 2] != AT_NULL);
67113 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
67114 }
67115
67116@@ -1493,7 +1972,7 @@ static void fill_siginfo_note(struct memelfnote *note, user_siginfo_t *csigdata,
67117 {
67118 mm_segment_t old_fs = get_fs();
67119 set_fs(KERNEL_DS);
67120- copy_siginfo_to_user((user_siginfo_t __user *) csigdata, siginfo);
67121+ copy_siginfo_to_user((user_siginfo_t __force_user *) csigdata, siginfo);
67122 set_fs(old_fs);
67123 fill_note(note, "CORE", NT_SIGINFO, sizeof(*csigdata), csigdata);
67124 }
67125@@ -2213,7 +2692,7 @@ static int elf_core_dump(struct coredump_params *cprm)
67126 vma = next_vma(vma, gate_vma)) {
67127 unsigned long dump_size;
67128
67129- dump_size = vma_dump_size(vma, cprm->mm_flags);
67130+ dump_size = vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
67131 vma_filesz[i++] = dump_size;
67132 vma_data_size += dump_size;
67133 }
67134@@ -2321,6 +2800,167 @@ out:
67135
67136 #endif /* CONFIG_ELF_CORE */
67137
67138+#ifdef CONFIG_PAX_MPROTECT
67139+/* PaX: non-PIC ELF libraries need relocations on their executable segments
67140+ * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
67141+ * we'll remove VM_MAYWRITE for good on RELRO segments.
67142+ *
67143+ * The checks favour ld-linux.so behaviour which operates on a per ELF segment
67144+ * basis because we want to allow the common case and not the special ones.
67145+ */
67146+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
67147+{
67148+ struct elfhdr elf_h;
67149+ struct elf_phdr elf_p;
67150+ unsigned long i;
67151+ unsigned long oldflags;
67152+ bool is_textrel_rw, is_textrel_rx, is_relro;
67153+
67154+ if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT) || !vma->vm_file)
67155+ return;
67156+
67157+ oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
67158+ newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
67159+
67160+#ifdef CONFIG_PAX_ELFRELOCS
67161+ /* possible TEXTREL */
67162+ is_textrel_rw = !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
67163+ is_textrel_rx = vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
67164+#else
67165+ is_textrel_rw = false;
67166+ is_textrel_rx = false;
67167+#endif
67168+
67169+ /* possible RELRO */
67170+ is_relro = vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
67171+
67172+ if (!is_textrel_rw && !is_textrel_rx && !is_relro)
67173+ return;
67174+
67175+ if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
67176+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
67177+
67178+#ifdef CONFIG_PAX_ETEXECRELOCS
67179+ ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
67180+#else
67181+ ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
67182+#endif
67183+
67184+ (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
67185+ !elf_check_arch(&elf_h) ||
67186+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
67187+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
67188+ return;
67189+
67190+ for (i = 0UL; i < elf_h.e_phnum; i++) {
67191+ if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
67192+ return;
67193+ switch (elf_p.p_type) {
67194+ case PT_DYNAMIC:
67195+ if (!is_textrel_rw && !is_textrel_rx)
67196+ continue;
67197+ i = 0UL;
67198+ while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
67199+ elf_dyn dyn;
67200+
67201+ if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
67202+ break;
67203+ if (dyn.d_tag == DT_NULL)
67204+ break;
67205+ if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
67206+ gr_log_textrel(vma);
67207+ if (is_textrel_rw)
67208+ vma->vm_flags |= VM_MAYWRITE;
67209+ else
67210+ /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
67211+ vma->vm_flags &= ~VM_MAYWRITE;
67212+ break;
67213+ }
67214+ i++;
67215+ }
67216+ is_textrel_rw = false;
67217+ is_textrel_rx = false;
67218+ continue;
67219+
67220+ case PT_GNU_RELRO:
67221+ if (!is_relro)
67222+ continue;
67223+ if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
67224+ vma->vm_flags &= ~VM_MAYWRITE;
67225+ is_relro = false;
67226+ continue;
67227+
67228+#ifdef CONFIG_PAX_PT_PAX_FLAGS
67229+ case PT_PAX_FLAGS: {
67230+ const char *msg_mprotect = "", *msg_emutramp = "";
67231+ char *buffer_lib, *buffer_exe;
67232+
67233+ if (elf_p.p_flags & PF_NOMPROTECT)
67234+ msg_mprotect = "MPROTECT disabled";
67235+
67236+#ifdef CONFIG_PAX_EMUTRAMP
67237+ if (!(vma->vm_mm->pax_flags & MF_PAX_EMUTRAMP) && !(elf_p.p_flags & PF_NOEMUTRAMP))
67238+ msg_emutramp = "EMUTRAMP enabled";
67239+#endif
67240+
67241+ if (!msg_mprotect[0] && !msg_emutramp[0])
67242+ continue;
67243+
67244+ if (!printk_ratelimit())
67245+ continue;
67246+
67247+ buffer_lib = (char *)__get_free_page(GFP_KERNEL);
67248+ buffer_exe = (char *)__get_free_page(GFP_KERNEL);
67249+ if (buffer_lib && buffer_exe) {
67250+ char *path_lib, *path_exe;
67251+
67252+ path_lib = pax_get_path(&vma->vm_file->f_path, buffer_lib, PAGE_SIZE);
67253+ path_exe = pax_get_path(&vma->vm_mm->exe_file->f_path, buffer_exe, PAGE_SIZE);
67254+
67255+ pr_info("PAX: %s wants %s%s%s on %s\n", path_lib, msg_mprotect,
67256+ (msg_mprotect[0] && msg_emutramp[0] ? " and " : ""), msg_emutramp, path_exe);
67257+
67258+ }
67259+ free_page((unsigned long)buffer_exe);
67260+ free_page((unsigned long)buffer_lib);
67261+ continue;
67262+ }
67263+#endif
67264+
67265+ }
67266+ }
67267+}
67268+#endif
67269+
67270+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
67271+
67272+extern int grsec_enable_log_rwxmaps;
67273+
67274+static void elf_handle_mmap(struct file *file)
67275+{
67276+ struct elfhdr elf_h;
67277+ struct elf_phdr elf_p;
67278+ unsigned long i;
67279+
67280+ if (!grsec_enable_log_rwxmaps)
67281+ return;
67282+
67283+ if (sizeof(elf_h) != kernel_read(file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
67284+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
67285+ (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC) || !elf_check_arch(&elf_h) ||
67286+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
67287+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
67288+ return;
67289+
67290+ for (i = 0UL; i < elf_h.e_phnum; i++) {
67291+ if (sizeof(elf_p) != kernel_read(file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
67292+ return;
67293+ if (elf_p.p_type == PT_GNU_STACK && (elf_p.p_flags & PF_X))
67294+ gr_log_ptgnustack(file);
67295+ }
67296+}
67297+#endif
67298+
67299 static int __init init_elf_binfmt(void)
67300 {
67301 register_binfmt(&elf_format);
67302diff --git a/fs/block_dev.c b/fs/block_dev.c
67303index 975266b..c3d1856 100644
67304--- a/fs/block_dev.c
67305+++ b/fs/block_dev.c
67306@@ -734,7 +734,7 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
67307 else if (bdev->bd_contains == bdev)
67308 return true; /* is a whole device which isn't held */
67309
67310- else if (whole->bd_holder == bd_may_claim)
67311+ else if (whole->bd_holder == (void *)bd_may_claim)
67312 return true; /* is a partition of a device that is being partitioned */
67313 else if (whole->bd_holder != NULL)
67314 return false; /* is a partition of a held device */
67315diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
67316index 6d67f32..8f33187 100644
67317--- a/fs/btrfs/ctree.c
67318+++ b/fs/btrfs/ctree.c
67319@@ -1181,9 +1181,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
67320 free_extent_buffer(buf);
67321 add_root_to_dirty_list(root);
67322 } else {
67323- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
67324- parent_start = parent->start;
67325- else
67326+ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
67327+ if (parent)
67328+ parent_start = parent->start;
67329+ else
67330+ parent_start = 0;
67331+ } else
67332 parent_start = 0;
67333
67334 WARN_ON(trans->transid != btrfs_header_generation(parent));
67335diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
67336index 82f0c7c..dff78a8 100644
67337--- a/fs/btrfs/delayed-inode.c
67338+++ b/fs/btrfs/delayed-inode.c
67339@@ -462,7 +462,7 @@ static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node *node,
67340
67341 static void finish_one_item(struct btrfs_delayed_root *delayed_root)
67342 {
67343- int seq = atomic_inc_return(&delayed_root->items_seq);
67344+ int seq = atomic_inc_return_unchecked(&delayed_root->items_seq);
67345 if ((atomic_dec_return(&delayed_root->items) <
67346 BTRFS_DELAYED_BACKGROUND || seq % BTRFS_DELAYED_BATCH == 0) &&
67347 waitqueue_active(&delayed_root->wait))
67348@@ -1412,7 +1412,7 @@ void btrfs_assert_delayed_root_empty(struct btrfs_root *root)
67349
67350 static int could_end_wait(struct btrfs_delayed_root *delayed_root, int seq)
67351 {
67352- int val = atomic_read(&delayed_root->items_seq);
67353+ int val = atomic_read_unchecked(&delayed_root->items_seq);
67354
67355 if (val < seq || val >= seq + BTRFS_DELAYED_BATCH)
67356 return 1;
67357@@ -1436,7 +1436,7 @@ void btrfs_balance_delayed_items(struct btrfs_root *root)
67358 int seq;
67359 int ret;
67360
67361- seq = atomic_read(&delayed_root->items_seq);
67362+ seq = atomic_read_unchecked(&delayed_root->items_seq);
67363
67364 ret = btrfs_wq_run_delayed_node(delayed_root, root, 0);
67365 if (ret)
67366diff --git a/fs/btrfs/delayed-inode.h b/fs/btrfs/delayed-inode.h
67367index f70119f..ab5894d 100644
67368--- a/fs/btrfs/delayed-inode.h
67369+++ b/fs/btrfs/delayed-inode.h
67370@@ -43,7 +43,7 @@ struct btrfs_delayed_root {
67371 */
67372 struct list_head prepare_list;
67373 atomic_t items; /* for delayed items */
67374- atomic_t items_seq; /* for delayed items */
67375+ atomic_unchecked_t items_seq; /* for delayed items */
67376 int nodes; /* for delayed nodes */
67377 wait_queue_head_t wait;
67378 };
67379@@ -90,7 +90,7 @@ static inline void btrfs_init_delayed_root(
67380 struct btrfs_delayed_root *delayed_root)
67381 {
67382 atomic_set(&delayed_root->items, 0);
67383- atomic_set(&delayed_root->items_seq, 0);
67384+ atomic_set_unchecked(&delayed_root->items_seq, 0);
67385 delayed_root->nodes = 0;
67386 spin_lock_init(&delayed_root->lock);
67387 init_waitqueue_head(&delayed_root->wait);
67388diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
67389index e477ed6..480c0db 100644
67390--- a/fs/btrfs/super.c
67391+++ b/fs/btrfs/super.c
67392@@ -271,7 +271,7 @@ void __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
67393 function, line, errstr);
67394 return;
67395 }
67396- ACCESS_ONCE(trans->transaction->aborted) = errno;
67397+ ACCESS_ONCE_RW(trans->transaction->aborted) = errno;
67398 /* Wake up anybody who may be waiting on this transaction */
67399 wake_up(&root->fs_info->transaction_wait);
67400 wake_up(&root->fs_info->transaction_blocked_wait);
67401diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
67402index 94edb0a..e94dc93 100644
67403--- a/fs/btrfs/sysfs.c
67404+++ b/fs/btrfs/sysfs.c
67405@@ -472,7 +472,7 @@ static int addrm_unknown_feature_attrs(struct btrfs_fs_info *fs_info, bool add)
67406 for (set = 0; set < FEAT_MAX; set++) {
67407 int i;
67408 struct attribute *attrs[2];
67409- struct attribute_group agroup = {
67410+ attribute_group_no_const agroup = {
67411 .name = "features",
67412 .attrs = attrs,
67413 };
67414diff --git a/fs/btrfs/tests/free-space-tests.c b/fs/btrfs/tests/free-space-tests.c
67415index 2299bfd..4098e72 100644
67416--- a/fs/btrfs/tests/free-space-tests.c
67417+++ b/fs/btrfs/tests/free-space-tests.c
67418@@ -463,7 +463,9 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache)
67419 * extent entry.
67420 */
67421 use_bitmap_op = cache->free_space_ctl->op->use_bitmap;
67422- cache->free_space_ctl->op->use_bitmap = test_use_bitmap;
67423+ pax_open_kernel();
67424+ *(void **)&cache->free_space_ctl->op->use_bitmap = test_use_bitmap;
67425+ pax_close_kernel();
67426
67427 /*
67428 * Extent entry covering free space range [128Mb - 256Kb, 128Mb - 128Kb[
67429@@ -870,7 +872,9 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache)
67430 if (ret)
67431 return ret;
67432
67433- cache->free_space_ctl->op->use_bitmap = use_bitmap_op;
67434+ pax_open_kernel();
67435+ *(void **)&cache->free_space_ctl->op->use_bitmap = use_bitmap_op;
67436+ pax_close_kernel();
67437 __btrfs_remove_free_space_cache(cache->free_space_ctl);
67438
67439 return 0;
67440diff --git a/fs/btrfs/tree-log.h b/fs/btrfs/tree-log.h
67441index 154990c..d0cf699 100644
67442--- a/fs/btrfs/tree-log.h
67443+++ b/fs/btrfs/tree-log.h
67444@@ -43,7 +43,7 @@ static inline void btrfs_init_log_ctx(struct btrfs_log_ctx *ctx)
67445 static inline void btrfs_set_log_full_commit(struct btrfs_fs_info *fs_info,
67446 struct btrfs_trans_handle *trans)
67447 {
67448- ACCESS_ONCE(fs_info->last_trans_log_full_commit) = trans->transid;
67449+ ACCESS_ONCE_RW(fs_info->last_trans_log_full_commit) = trans->transid;
67450 }
67451
67452 static inline int btrfs_need_log_full_commit(struct btrfs_fs_info *fs_info,
67453diff --git a/fs/buffer.c b/fs/buffer.c
67454index 20805db..2e8fc69 100644
67455--- a/fs/buffer.c
67456+++ b/fs/buffer.c
67457@@ -3417,7 +3417,7 @@ void __init buffer_init(void)
67458 bh_cachep = kmem_cache_create("buffer_head",
67459 sizeof(struct buffer_head), 0,
67460 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
67461- SLAB_MEM_SPREAD),
67462+ SLAB_MEM_SPREAD|SLAB_NO_SANITIZE),
67463 NULL);
67464
67465 /*
67466diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
67467index fbb08e9..0fda764 100644
67468--- a/fs/cachefiles/bind.c
67469+++ b/fs/cachefiles/bind.c
67470@@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
67471 args);
67472
67473 /* start by checking things over */
67474- ASSERT(cache->fstop_percent >= 0 &&
67475- cache->fstop_percent < cache->fcull_percent &&
67476+ ASSERT(cache->fstop_percent < cache->fcull_percent &&
67477 cache->fcull_percent < cache->frun_percent &&
67478 cache->frun_percent < 100);
67479
67480- ASSERT(cache->bstop_percent >= 0 &&
67481- cache->bstop_percent < cache->bcull_percent &&
67482+ ASSERT(cache->bstop_percent < cache->bcull_percent &&
67483 cache->bcull_percent < cache->brun_percent &&
67484 cache->brun_percent < 100);
67485
67486diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
67487index f601def..b2cf704 100644
67488--- a/fs/cachefiles/daemon.c
67489+++ b/fs/cachefiles/daemon.c
67490@@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer,
67491 if (n > buflen)
67492 return -EMSGSIZE;
67493
67494- if (copy_to_user(_buffer, buffer, n) != 0)
67495+ if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
67496 return -EFAULT;
67497
67498 return n;
67499@@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
67500 if (test_bit(CACHEFILES_DEAD, &cache->flags))
67501 return -EIO;
67502
67503- if (datalen < 0 || datalen > PAGE_SIZE - 1)
67504+ if (datalen > PAGE_SIZE - 1)
67505 return -EOPNOTSUPP;
67506
67507 /* drag the command string into the kernel so we can parse it */
67508@@ -385,7 +385,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
67509 if (args[0] != '%' || args[1] != '\0')
67510 return -EINVAL;
67511
67512- if (fstop < 0 || fstop >= cache->fcull_percent)
67513+ if (fstop >= cache->fcull_percent)
67514 return cachefiles_daemon_range_error(cache, args);
67515
67516 cache->fstop_percent = fstop;
67517@@ -457,7 +457,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
67518 if (args[0] != '%' || args[1] != '\0')
67519 return -EINVAL;
67520
67521- if (bstop < 0 || bstop >= cache->bcull_percent)
67522+ if (bstop >= cache->bcull_percent)
67523 return cachefiles_daemon_range_error(cache, args);
67524
67525 cache->bstop_percent = bstop;
67526diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
67527index 8c52472..c4e3a69 100644
67528--- a/fs/cachefiles/internal.h
67529+++ b/fs/cachefiles/internal.h
67530@@ -66,7 +66,7 @@ struct cachefiles_cache {
67531 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
67532 struct rb_root active_nodes; /* active nodes (can't be culled) */
67533 rwlock_t active_lock; /* lock for active_nodes */
67534- atomic_t gravecounter; /* graveyard uniquifier */
67535+ atomic_unchecked_t gravecounter; /* graveyard uniquifier */
67536 unsigned frun_percent; /* when to stop culling (% files) */
67537 unsigned fcull_percent; /* when to start culling (% files) */
67538 unsigned fstop_percent; /* when to stop allocating (% files) */
67539@@ -178,19 +178,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
67540 * proc.c
67541 */
67542 #ifdef CONFIG_CACHEFILES_HISTOGRAM
67543-extern atomic_t cachefiles_lookup_histogram[HZ];
67544-extern atomic_t cachefiles_mkdir_histogram[HZ];
67545-extern atomic_t cachefiles_create_histogram[HZ];
67546+extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
67547+extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
67548+extern atomic_unchecked_t cachefiles_create_histogram[HZ];
67549
67550 extern int __init cachefiles_proc_init(void);
67551 extern void cachefiles_proc_cleanup(void);
67552 static inline
67553-void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
67554+void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
67555 {
67556 unsigned long jif = jiffies - start_jif;
67557 if (jif >= HZ)
67558 jif = HZ - 1;
67559- atomic_inc(&histogram[jif]);
67560+ atomic_inc_unchecked(&histogram[jif]);
67561 }
67562
67563 #else
67564diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
67565index 1e51714e..411eded 100644
67566--- a/fs/cachefiles/namei.c
67567+++ b/fs/cachefiles/namei.c
67568@@ -309,7 +309,7 @@ try_again:
67569 /* first step is to make up a grave dentry in the graveyard */
67570 sprintf(nbuffer, "%08x%08x",
67571 (uint32_t) get_seconds(),
67572- (uint32_t) atomic_inc_return(&cache->gravecounter));
67573+ (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
67574
67575 /* do the multiway lock magic */
67576 trap = lock_rename(cache->graveyard, dir);
67577diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c
67578index eccd339..4c1d995 100644
67579--- a/fs/cachefiles/proc.c
67580+++ b/fs/cachefiles/proc.c
67581@@ -14,9 +14,9 @@
67582 #include <linux/seq_file.h>
67583 #include "internal.h"
67584
67585-atomic_t cachefiles_lookup_histogram[HZ];
67586-atomic_t cachefiles_mkdir_histogram[HZ];
67587-atomic_t cachefiles_create_histogram[HZ];
67588+atomic_unchecked_t cachefiles_lookup_histogram[HZ];
67589+atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
67590+atomic_unchecked_t cachefiles_create_histogram[HZ];
67591
67592 /*
67593 * display the latency histogram
67594@@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v)
67595 return 0;
67596 default:
67597 index = (unsigned long) v - 3;
67598- x = atomic_read(&cachefiles_lookup_histogram[index]);
67599- y = atomic_read(&cachefiles_mkdir_histogram[index]);
67600- z = atomic_read(&cachefiles_create_histogram[index]);
67601+ x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
67602+ y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
67603+ z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
67604 if (x == 0 && y == 0 && z == 0)
67605 return 0;
67606
67607diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
67608index 83e9976..bfd1eee 100644
67609--- a/fs/ceph/dir.c
67610+++ b/fs/ceph/dir.c
67611@@ -127,6 +127,8 @@ static int __dcache_readdir(struct file *file, struct dir_context *ctx,
67612 struct dentry *dentry, *last;
67613 struct ceph_dentry_info *di;
67614 int err = 0;
67615+ char d_name[DNAME_INLINE_LEN];
67616+ const unsigned char *name;
67617
67618 /* claim ref on last dentry we returned */
67619 last = fi->dentry;
67620@@ -190,7 +192,12 @@ more:
67621
67622 dout(" %llu (%llu) dentry %p %pd %p\n", di->offset, ctx->pos,
67623 dentry, dentry, dentry->d_inode);
67624- if (!dir_emit(ctx, dentry->d_name.name,
67625+ name = dentry->d_name.name;
67626+ if (name == dentry->d_iname) {
67627+ memcpy(d_name, name, dentry->d_name.len);
67628+ name = d_name;
67629+ }
67630+ if (!dir_emit(ctx, name,
67631 dentry->d_name.len,
67632 ceph_translate_ino(dentry->d_sb, dentry->d_inode->i_ino),
67633 dentry->d_inode->i_mode >> 12)) {
67634@@ -248,7 +255,7 @@ static int ceph_readdir(struct file *file, struct dir_context *ctx)
67635 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
67636 struct ceph_mds_client *mdsc = fsc->mdsc;
67637 unsigned frag = fpos_frag(ctx->pos);
67638- int off = fpos_off(ctx->pos);
67639+ unsigned int off = fpos_off(ctx->pos);
67640 int err;
67641 u32 ftype;
67642 struct ceph_mds_reply_info_parsed *rinfo;
67643diff --git a/fs/ceph/super.c b/fs/ceph/super.c
67644index a63997b..ddc0577 100644
67645--- a/fs/ceph/super.c
67646+++ b/fs/ceph/super.c
67647@@ -889,7 +889,7 @@ static int ceph_compare_super(struct super_block *sb, void *data)
67648 /*
67649 * construct our own bdi so we can control readahead, etc.
67650 */
67651-static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
67652+static atomic_long_unchecked_t bdi_seq = ATOMIC_LONG_INIT(0);
67653
67654 static int ceph_register_bdi(struct super_block *sb,
67655 struct ceph_fs_client *fsc)
67656@@ -906,7 +906,7 @@ static int ceph_register_bdi(struct super_block *sb,
67657 VM_MAX_READAHEAD * 1024 / PAGE_CACHE_SIZE;
67658
67659 err = bdi_register(&fsc->backing_dev_info, NULL, "ceph-%ld",
67660- atomic_long_inc_return(&bdi_seq));
67661+ atomic_long_inc_return_unchecked(&bdi_seq));
67662 if (!err)
67663 sb->s_bdi = &fsc->backing_dev_info;
67664 return err;
67665diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
67666index 7febcf2..62a5721 100644
67667--- a/fs/cifs/cifs_debug.c
67668+++ b/fs/cifs/cifs_debug.c
67669@@ -269,8 +269,8 @@ static ssize_t cifs_stats_proc_write(struct file *file,
67670
67671 if (strtobool(&c, &bv) == 0) {
67672 #ifdef CONFIG_CIFS_STATS2
67673- atomic_set(&totBufAllocCount, 0);
67674- atomic_set(&totSmBufAllocCount, 0);
67675+ atomic_set_unchecked(&totBufAllocCount, 0);
67676+ atomic_set_unchecked(&totSmBufAllocCount, 0);
67677 #endif /* CONFIG_CIFS_STATS2 */
67678 spin_lock(&cifs_tcp_ses_lock);
67679 list_for_each(tmp1, &cifs_tcp_ses_list) {
67680@@ -283,7 +283,7 @@ static ssize_t cifs_stats_proc_write(struct file *file,
67681 tcon = list_entry(tmp3,
67682 struct cifs_tcon,
67683 tcon_list);
67684- atomic_set(&tcon->num_smbs_sent, 0);
67685+ atomic_set_unchecked(&tcon->num_smbs_sent, 0);
67686 if (server->ops->clear_stats)
67687 server->ops->clear_stats(tcon);
67688 }
67689@@ -315,8 +315,8 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
67690 smBufAllocCount.counter, cifs_min_small);
67691 #ifdef CONFIG_CIFS_STATS2
67692 seq_printf(m, "Total Large %d Small %d Allocations\n",
67693- atomic_read(&totBufAllocCount),
67694- atomic_read(&totSmBufAllocCount));
67695+ atomic_read_unchecked(&totBufAllocCount),
67696+ atomic_read_unchecked(&totSmBufAllocCount));
67697 #endif /* CONFIG_CIFS_STATS2 */
67698
67699 seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
67700@@ -345,7 +345,7 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
67701 if (tcon->need_reconnect)
67702 seq_puts(m, "\tDISCONNECTED ");
67703 seq_printf(m, "\nSMBs: %d",
67704- atomic_read(&tcon->num_smbs_sent));
67705+ atomic_read_unchecked(&tcon->num_smbs_sent));
67706 if (server->ops->print_stats)
67707 server->ops->print_stats(m, tcon);
67708 }
67709diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
67710index d72fe37..ded5511 100644
67711--- a/fs/cifs/cifsfs.c
67712+++ b/fs/cifs/cifsfs.c
67713@@ -1092,7 +1092,7 @@ cifs_init_request_bufs(void)
67714 */
67715 cifs_req_cachep = kmem_cache_create("cifs_request",
67716 CIFSMaxBufSize + max_hdr_size, 0,
67717- SLAB_HWCACHE_ALIGN, NULL);
67718+ SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
67719 if (cifs_req_cachep == NULL)
67720 return -ENOMEM;
67721
67722@@ -1119,7 +1119,7 @@ cifs_init_request_bufs(void)
67723 efficient to alloc 1 per page off the slab compared to 17K (5page)
67724 alloc of large cifs buffers even when page debugging is on */
67725 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
67726- MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
67727+ MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
67728 NULL);
67729 if (cifs_sm_req_cachep == NULL) {
67730 mempool_destroy(cifs_req_poolp);
67731@@ -1204,8 +1204,8 @@ init_cifs(void)
67732 atomic_set(&bufAllocCount, 0);
67733 atomic_set(&smBufAllocCount, 0);
67734 #ifdef CONFIG_CIFS_STATS2
67735- atomic_set(&totBufAllocCount, 0);
67736- atomic_set(&totSmBufAllocCount, 0);
67737+ atomic_set_unchecked(&totBufAllocCount, 0);
67738+ atomic_set_unchecked(&totSmBufAllocCount, 0);
67739 #endif /* CONFIG_CIFS_STATS2 */
67740
67741 atomic_set(&midCount, 0);
67742diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
67743index 22b289a..bbbba082 100644
67744--- a/fs/cifs/cifsglob.h
67745+++ b/fs/cifs/cifsglob.h
67746@@ -823,35 +823,35 @@ struct cifs_tcon {
67747 __u16 Flags; /* optional support bits */
67748 enum statusEnum tidStatus;
67749 #ifdef CONFIG_CIFS_STATS
67750- atomic_t num_smbs_sent;
67751+ atomic_unchecked_t num_smbs_sent;
67752 union {
67753 struct {
67754- atomic_t num_writes;
67755- atomic_t num_reads;
67756- atomic_t num_flushes;
67757- atomic_t num_oplock_brks;
67758- atomic_t num_opens;
67759- atomic_t num_closes;
67760- atomic_t num_deletes;
67761- atomic_t num_mkdirs;
67762- atomic_t num_posixopens;
67763- atomic_t num_posixmkdirs;
67764- atomic_t num_rmdirs;
67765- atomic_t num_renames;
67766- atomic_t num_t2renames;
67767- atomic_t num_ffirst;
67768- atomic_t num_fnext;
67769- atomic_t num_fclose;
67770- atomic_t num_hardlinks;
67771- atomic_t num_symlinks;
67772- atomic_t num_locks;
67773- atomic_t num_acl_get;
67774- atomic_t num_acl_set;
67775+ atomic_unchecked_t num_writes;
67776+ atomic_unchecked_t num_reads;
67777+ atomic_unchecked_t num_flushes;
67778+ atomic_unchecked_t num_oplock_brks;
67779+ atomic_unchecked_t num_opens;
67780+ atomic_unchecked_t num_closes;
67781+ atomic_unchecked_t num_deletes;
67782+ atomic_unchecked_t num_mkdirs;
67783+ atomic_unchecked_t num_posixopens;
67784+ atomic_unchecked_t num_posixmkdirs;
67785+ atomic_unchecked_t num_rmdirs;
67786+ atomic_unchecked_t num_renames;
67787+ atomic_unchecked_t num_t2renames;
67788+ atomic_unchecked_t num_ffirst;
67789+ atomic_unchecked_t num_fnext;
67790+ atomic_unchecked_t num_fclose;
67791+ atomic_unchecked_t num_hardlinks;
67792+ atomic_unchecked_t num_symlinks;
67793+ atomic_unchecked_t num_locks;
67794+ atomic_unchecked_t num_acl_get;
67795+ atomic_unchecked_t num_acl_set;
67796 } cifs_stats;
67797 #ifdef CONFIG_CIFS_SMB2
67798 struct {
67799- atomic_t smb2_com_sent[NUMBER_OF_SMB2_COMMANDS];
67800- atomic_t smb2_com_failed[NUMBER_OF_SMB2_COMMANDS];
67801+ atomic_unchecked_t smb2_com_sent[NUMBER_OF_SMB2_COMMANDS];
67802+ atomic_unchecked_t smb2_com_failed[NUMBER_OF_SMB2_COMMANDS];
67803 } smb2_stats;
67804 #endif /* CONFIG_CIFS_SMB2 */
67805 } stats;
67806@@ -1198,7 +1198,7 @@ convert_delimiter(char *path, char delim)
67807 }
67808
67809 #ifdef CONFIG_CIFS_STATS
67810-#define cifs_stats_inc atomic_inc
67811+#define cifs_stats_inc atomic_inc_unchecked
67812
67813 static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
67814 unsigned int bytes)
67815@@ -1565,8 +1565,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
67816 /* Various Debug counters */
67817 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
67818 #ifdef CONFIG_CIFS_STATS2
67819-GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
67820-GLOBAL_EXTERN atomic_t totSmBufAllocCount;
67821+GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
67822+GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
67823 #endif
67824 GLOBAL_EXTERN atomic_t smBufAllocCount;
67825 GLOBAL_EXTERN atomic_t midCount;
67826diff --git a/fs/cifs/file.c b/fs/cifs/file.c
67827index ca30c39..570fb94 100644
67828--- a/fs/cifs/file.c
67829+++ b/fs/cifs/file.c
67830@@ -2055,10 +2055,14 @@ static int cifs_writepages(struct address_space *mapping,
67831 index = mapping->writeback_index; /* Start from prev offset */
67832 end = -1;
67833 } else {
67834- index = wbc->range_start >> PAGE_CACHE_SHIFT;
67835- end = wbc->range_end >> PAGE_CACHE_SHIFT;
67836- if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
67837+ if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) {
67838 range_whole = true;
67839+ index = 0;
67840+ end = ULONG_MAX;
67841+ } else {
67842+ index = wbc->range_start >> PAGE_CACHE_SHIFT;
67843+ end = wbc->range_end >> PAGE_CACHE_SHIFT;
67844+ }
67845 scanned = true;
67846 }
67847 server = cifs_sb_master_tcon(cifs_sb)->ses->server;
67848diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
67849index 3379463..3af418a 100644
67850--- a/fs/cifs/misc.c
67851+++ b/fs/cifs/misc.c
67852@@ -170,7 +170,7 @@ cifs_buf_get(void)
67853 memset(ret_buf, 0, buf_size + 3);
67854 atomic_inc(&bufAllocCount);
67855 #ifdef CONFIG_CIFS_STATS2
67856- atomic_inc(&totBufAllocCount);
67857+ atomic_inc_unchecked(&totBufAllocCount);
67858 #endif /* CONFIG_CIFS_STATS2 */
67859 }
67860
67861@@ -205,7 +205,7 @@ cifs_small_buf_get(void)
67862 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
67863 atomic_inc(&smBufAllocCount);
67864 #ifdef CONFIG_CIFS_STATS2
67865- atomic_inc(&totSmBufAllocCount);
67866+ atomic_inc_unchecked(&totSmBufAllocCount);
67867 #endif /* CONFIG_CIFS_STATS2 */
67868
67869 }
67870diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
67871index d297903..1cb7516 100644
67872--- a/fs/cifs/smb1ops.c
67873+++ b/fs/cifs/smb1ops.c
67874@@ -622,27 +622,27 @@ static void
67875 cifs_clear_stats(struct cifs_tcon *tcon)
67876 {
67877 #ifdef CONFIG_CIFS_STATS
67878- atomic_set(&tcon->stats.cifs_stats.num_writes, 0);
67879- atomic_set(&tcon->stats.cifs_stats.num_reads, 0);
67880- atomic_set(&tcon->stats.cifs_stats.num_flushes, 0);
67881- atomic_set(&tcon->stats.cifs_stats.num_oplock_brks, 0);
67882- atomic_set(&tcon->stats.cifs_stats.num_opens, 0);
67883- atomic_set(&tcon->stats.cifs_stats.num_posixopens, 0);
67884- atomic_set(&tcon->stats.cifs_stats.num_posixmkdirs, 0);
67885- atomic_set(&tcon->stats.cifs_stats.num_closes, 0);
67886- atomic_set(&tcon->stats.cifs_stats.num_deletes, 0);
67887- atomic_set(&tcon->stats.cifs_stats.num_mkdirs, 0);
67888- atomic_set(&tcon->stats.cifs_stats.num_rmdirs, 0);
67889- atomic_set(&tcon->stats.cifs_stats.num_renames, 0);
67890- atomic_set(&tcon->stats.cifs_stats.num_t2renames, 0);
67891- atomic_set(&tcon->stats.cifs_stats.num_ffirst, 0);
67892- atomic_set(&tcon->stats.cifs_stats.num_fnext, 0);
67893- atomic_set(&tcon->stats.cifs_stats.num_fclose, 0);
67894- atomic_set(&tcon->stats.cifs_stats.num_hardlinks, 0);
67895- atomic_set(&tcon->stats.cifs_stats.num_symlinks, 0);
67896- atomic_set(&tcon->stats.cifs_stats.num_locks, 0);
67897- atomic_set(&tcon->stats.cifs_stats.num_acl_get, 0);
67898- atomic_set(&tcon->stats.cifs_stats.num_acl_set, 0);
67899+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_writes, 0);
67900+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_reads, 0);
67901+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_flushes, 0);
67902+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_oplock_brks, 0);
67903+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_opens, 0);
67904+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_posixopens, 0);
67905+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_posixmkdirs, 0);
67906+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_closes, 0);
67907+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_deletes, 0);
67908+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_mkdirs, 0);
67909+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_rmdirs, 0);
67910+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_renames, 0);
67911+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_t2renames, 0);
67912+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_ffirst, 0);
67913+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_fnext, 0);
67914+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_fclose, 0);
67915+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_hardlinks, 0);
67916+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_symlinks, 0);
67917+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_locks, 0);
67918+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_acl_get, 0);
67919+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_acl_set, 0);
67920 #endif
67921 }
67922
67923@@ -651,36 +651,36 @@ cifs_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
67924 {
67925 #ifdef CONFIG_CIFS_STATS
67926 seq_printf(m, " Oplocks breaks: %d",
67927- atomic_read(&tcon->stats.cifs_stats.num_oplock_brks));
67928+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_oplock_brks));
67929 seq_printf(m, "\nReads: %d Bytes: %llu",
67930- atomic_read(&tcon->stats.cifs_stats.num_reads),
67931+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_reads),
67932 (long long)(tcon->bytes_read));
67933 seq_printf(m, "\nWrites: %d Bytes: %llu",
67934- atomic_read(&tcon->stats.cifs_stats.num_writes),
67935+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_writes),
67936 (long long)(tcon->bytes_written));
67937 seq_printf(m, "\nFlushes: %d",
67938- atomic_read(&tcon->stats.cifs_stats.num_flushes));
67939+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_flushes));
67940 seq_printf(m, "\nLocks: %d HardLinks: %d Symlinks: %d",
67941- atomic_read(&tcon->stats.cifs_stats.num_locks),
67942- atomic_read(&tcon->stats.cifs_stats.num_hardlinks),
67943- atomic_read(&tcon->stats.cifs_stats.num_symlinks));
67944+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_locks),
67945+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_hardlinks),
67946+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_symlinks));
67947 seq_printf(m, "\nOpens: %d Closes: %d Deletes: %d",
67948- atomic_read(&tcon->stats.cifs_stats.num_opens),
67949- atomic_read(&tcon->stats.cifs_stats.num_closes),
67950- atomic_read(&tcon->stats.cifs_stats.num_deletes));
67951+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_opens),
67952+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_closes),
67953+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_deletes));
67954 seq_printf(m, "\nPosix Opens: %d Posix Mkdirs: %d",
67955- atomic_read(&tcon->stats.cifs_stats.num_posixopens),
67956- atomic_read(&tcon->stats.cifs_stats.num_posixmkdirs));
67957+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_posixopens),
67958+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_posixmkdirs));
67959 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
67960- atomic_read(&tcon->stats.cifs_stats.num_mkdirs),
67961- atomic_read(&tcon->stats.cifs_stats.num_rmdirs));
67962+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_mkdirs),
67963+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_rmdirs));
67964 seq_printf(m, "\nRenames: %d T2 Renames %d",
67965- atomic_read(&tcon->stats.cifs_stats.num_renames),
67966- atomic_read(&tcon->stats.cifs_stats.num_t2renames));
67967+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_renames),
67968+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_t2renames));
67969 seq_printf(m, "\nFindFirst: %d FNext %d FClose %d",
67970- atomic_read(&tcon->stats.cifs_stats.num_ffirst),
67971- atomic_read(&tcon->stats.cifs_stats.num_fnext),
67972- atomic_read(&tcon->stats.cifs_stats.num_fclose));
67973+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_ffirst),
67974+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_fnext),
67975+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_fclose));
67976 #endif
67977 }
67978
67979diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
67980index eab05e1..ffe5ea4 100644
67981--- a/fs/cifs/smb2ops.c
67982+++ b/fs/cifs/smb2ops.c
67983@@ -418,8 +418,8 @@ smb2_clear_stats(struct cifs_tcon *tcon)
67984 #ifdef CONFIG_CIFS_STATS
67985 int i;
67986 for (i = 0; i < NUMBER_OF_SMB2_COMMANDS; i++) {
67987- atomic_set(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
67988- atomic_set(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
67989+ atomic_set_unchecked(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
67990+ atomic_set_unchecked(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
67991 }
67992 #endif
67993 }
67994@@ -459,65 +459,65 @@ static void
67995 smb2_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
67996 {
67997 #ifdef CONFIG_CIFS_STATS
67998- atomic_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
67999- atomic_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
68000+ atomic_unchecked_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
68001+ atomic_unchecked_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
68002 seq_printf(m, "\nNegotiates: %d sent %d failed",
68003- atomic_read(&sent[SMB2_NEGOTIATE_HE]),
68004- atomic_read(&failed[SMB2_NEGOTIATE_HE]));
68005+ atomic_read_unchecked(&sent[SMB2_NEGOTIATE_HE]),
68006+ atomic_read_unchecked(&failed[SMB2_NEGOTIATE_HE]));
68007 seq_printf(m, "\nSessionSetups: %d sent %d failed",
68008- atomic_read(&sent[SMB2_SESSION_SETUP_HE]),
68009- atomic_read(&failed[SMB2_SESSION_SETUP_HE]));
68010+ atomic_read_unchecked(&sent[SMB2_SESSION_SETUP_HE]),
68011+ atomic_read_unchecked(&failed[SMB2_SESSION_SETUP_HE]));
68012 seq_printf(m, "\nLogoffs: %d sent %d failed",
68013- atomic_read(&sent[SMB2_LOGOFF_HE]),
68014- atomic_read(&failed[SMB2_LOGOFF_HE]));
68015+ atomic_read_unchecked(&sent[SMB2_LOGOFF_HE]),
68016+ atomic_read_unchecked(&failed[SMB2_LOGOFF_HE]));
68017 seq_printf(m, "\nTreeConnects: %d sent %d failed",
68018- atomic_read(&sent[SMB2_TREE_CONNECT_HE]),
68019- atomic_read(&failed[SMB2_TREE_CONNECT_HE]));
68020+ atomic_read_unchecked(&sent[SMB2_TREE_CONNECT_HE]),
68021+ atomic_read_unchecked(&failed[SMB2_TREE_CONNECT_HE]));
68022 seq_printf(m, "\nTreeDisconnects: %d sent %d failed",
68023- atomic_read(&sent[SMB2_TREE_DISCONNECT_HE]),
68024- atomic_read(&failed[SMB2_TREE_DISCONNECT_HE]));
68025+ atomic_read_unchecked(&sent[SMB2_TREE_DISCONNECT_HE]),
68026+ atomic_read_unchecked(&failed[SMB2_TREE_DISCONNECT_HE]));
68027 seq_printf(m, "\nCreates: %d sent %d failed",
68028- atomic_read(&sent[SMB2_CREATE_HE]),
68029- atomic_read(&failed[SMB2_CREATE_HE]));
68030+ atomic_read_unchecked(&sent[SMB2_CREATE_HE]),
68031+ atomic_read_unchecked(&failed[SMB2_CREATE_HE]));
68032 seq_printf(m, "\nCloses: %d sent %d failed",
68033- atomic_read(&sent[SMB2_CLOSE_HE]),
68034- atomic_read(&failed[SMB2_CLOSE_HE]));
68035+ atomic_read_unchecked(&sent[SMB2_CLOSE_HE]),
68036+ atomic_read_unchecked(&failed[SMB2_CLOSE_HE]));
68037 seq_printf(m, "\nFlushes: %d sent %d failed",
68038- atomic_read(&sent[SMB2_FLUSH_HE]),
68039- atomic_read(&failed[SMB2_FLUSH_HE]));
68040+ atomic_read_unchecked(&sent[SMB2_FLUSH_HE]),
68041+ atomic_read_unchecked(&failed[SMB2_FLUSH_HE]));
68042 seq_printf(m, "\nReads: %d sent %d failed",
68043- atomic_read(&sent[SMB2_READ_HE]),
68044- atomic_read(&failed[SMB2_READ_HE]));
68045+ atomic_read_unchecked(&sent[SMB2_READ_HE]),
68046+ atomic_read_unchecked(&failed[SMB2_READ_HE]));
68047 seq_printf(m, "\nWrites: %d sent %d failed",
68048- atomic_read(&sent[SMB2_WRITE_HE]),
68049- atomic_read(&failed[SMB2_WRITE_HE]));
68050+ atomic_read_unchecked(&sent[SMB2_WRITE_HE]),
68051+ atomic_read_unchecked(&failed[SMB2_WRITE_HE]));
68052 seq_printf(m, "\nLocks: %d sent %d failed",
68053- atomic_read(&sent[SMB2_LOCK_HE]),
68054- atomic_read(&failed[SMB2_LOCK_HE]));
68055+ atomic_read_unchecked(&sent[SMB2_LOCK_HE]),
68056+ atomic_read_unchecked(&failed[SMB2_LOCK_HE]));
68057 seq_printf(m, "\nIOCTLs: %d sent %d failed",
68058- atomic_read(&sent[SMB2_IOCTL_HE]),
68059- atomic_read(&failed[SMB2_IOCTL_HE]));
68060+ atomic_read_unchecked(&sent[SMB2_IOCTL_HE]),
68061+ atomic_read_unchecked(&failed[SMB2_IOCTL_HE]));
68062 seq_printf(m, "\nCancels: %d sent %d failed",
68063- atomic_read(&sent[SMB2_CANCEL_HE]),
68064- atomic_read(&failed[SMB2_CANCEL_HE]));
68065+ atomic_read_unchecked(&sent[SMB2_CANCEL_HE]),
68066+ atomic_read_unchecked(&failed[SMB2_CANCEL_HE]));
68067 seq_printf(m, "\nEchos: %d sent %d failed",
68068- atomic_read(&sent[SMB2_ECHO_HE]),
68069- atomic_read(&failed[SMB2_ECHO_HE]));
68070+ atomic_read_unchecked(&sent[SMB2_ECHO_HE]),
68071+ atomic_read_unchecked(&failed[SMB2_ECHO_HE]));
68072 seq_printf(m, "\nQueryDirectories: %d sent %d failed",
68073- atomic_read(&sent[SMB2_QUERY_DIRECTORY_HE]),
68074- atomic_read(&failed[SMB2_QUERY_DIRECTORY_HE]));
68075+ atomic_read_unchecked(&sent[SMB2_QUERY_DIRECTORY_HE]),
68076+ atomic_read_unchecked(&failed[SMB2_QUERY_DIRECTORY_HE]));
68077 seq_printf(m, "\nChangeNotifies: %d sent %d failed",
68078- atomic_read(&sent[SMB2_CHANGE_NOTIFY_HE]),
68079- atomic_read(&failed[SMB2_CHANGE_NOTIFY_HE]));
68080+ atomic_read_unchecked(&sent[SMB2_CHANGE_NOTIFY_HE]),
68081+ atomic_read_unchecked(&failed[SMB2_CHANGE_NOTIFY_HE]));
68082 seq_printf(m, "\nQueryInfos: %d sent %d failed",
68083- atomic_read(&sent[SMB2_QUERY_INFO_HE]),
68084- atomic_read(&failed[SMB2_QUERY_INFO_HE]));
68085+ atomic_read_unchecked(&sent[SMB2_QUERY_INFO_HE]),
68086+ atomic_read_unchecked(&failed[SMB2_QUERY_INFO_HE]));
68087 seq_printf(m, "\nSetInfos: %d sent %d failed",
68088- atomic_read(&sent[SMB2_SET_INFO_HE]),
68089- atomic_read(&failed[SMB2_SET_INFO_HE]));
68090+ atomic_read_unchecked(&sent[SMB2_SET_INFO_HE]),
68091+ atomic_read_unchecked(&failed[SMB2_SET_INFO_HE]));
68092 seq_printf(m, "\nOplockBreaks: %d sent %d failed",
68093- atomic_read(&sent[SMB2_OPLOCK_BREAK_HE]),
68094- atomic_read(&failed[SMB2_OPLOCK_BREAK_HE]));
68095+ atomic_read_unchecked(&sent[SMB2_OPLOCK_BREAK_HE]),
68096+ atomic_read_unchecked(&failed[SMB2_OPLOCK_BREAK_HE]));
68097 #endif
68098 }
68099
68100diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
68101index 65cd7a8..3518676 100644
68102--- a/fs/cifs/smb2pdu.c
68103+++ b/fs/cifs/smb2pdu.c
68104@@ -2147,8 +2147,7 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
68105 default:
68106 cifs_dbg(VFS, "info level %u isn't supported\n",
68107 srch_inf->info_level);
68108- rc = -EINVAL;
68109- goto qdir_exit;
68110+ return -EINVAL;
68111 }
68112
68113 req->FileIndex = cpu_to_le32(index);
68114diff --git a/fs/coda/cache.c b/fs/coda/cache.c
68115index 46ee6f2..89a9e7f 100644
68116--- a/fs/coda/cache.c
68117+++ b/fs/coda/cache.c
68118@@ -24,7 +24,7 @@
68119 #include "coda_linux.h"
68120 #include "coda_cache.h"
68121
68122-static atomic_t permission_epoch = ATOMIC_INIT(0);
68123+static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
68124
68125 /* replace or extend an acl cache hit */
68126 void coda_cache_enter(struct inode *inode, int mask)
68127@@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inode, int mask)
68128 struct coda_inode_info *cii = ITOC(inode);
68129
68130 spin_lock(&cii->c_lock);
68131- cii->c_cached_epoch = atomic_read(&permission_epoch);
68132+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
68133 if (!uid_eq(cii->c_uid, current_fsuid())) {
68134 cii->c_uid = current_fsuid();
68135 cii->c_cached_perm = mask;
68136@@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode *inode)
68137 {
68138 struct coda_inode_info *cii = ITOC(inode);
68139 spin_lock(&cii->c_lock);
68140- cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
68141+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
68142 spin_unlock(&cii->c_lock);
68143 }
68144
68145 /* remove all acl caches */
68146 void coda_cache_clear_all(struct super_block *sb)
68147 {
68148- atomic_inc(&permission_epoch);
68149+ atomic_inc_unchecked(&permission_epoch);
68150 }
68151
68152
68153@@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode, int mask)
68154 spin_lock(&cii->c_lock);
68155 hit = (mask & cii->c_cached_perm) == mask &&
68156 uid_eq(cii->c_uid, current_fsuid()) &&
68157- cii->c_cached_epoch == atomic_read(&permission_epoch);
68158+ cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
68159 spin_unlock(&cii->c_lock);
68160
68161 return hit;
68162diff --git a/fs/compat.c b/fs/compat.c
68163index 6fd272d..dd34ba2 100644
68164--- a/fs/compat.c
68165+++ b/fs/compat.c
68166@@ -54,7 +54,7 @@
68167 #include <asm/ioctls.h>
68168 #include "internal.h"
68169
68170-int compat_log = 1;
68171+int compat_log = 0;
68172
68173 int compat_printk(const char *fmt, ...)
68174 {
68175@@ -512,7 +512,7 @@ COMPAT_SYSCALL_DEFINE2(io_setup, unsigned, nr_reqs, u32 __user *, ctx32p)
68176
68177 set_fs(KERNEL_DS);
68178 /* The __user pointer cast is valid because of the set_fs() */
68179- ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
68180+ ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
68181 set_fs(oldfs);
68182 /* truncating is ok because it's a user address */
68183 if (!ret)
68184@@ -562,7 +562,7 @@ ssize_t compat_rw_copy_check_uvector(int type,
68185 goto out;
68186
68187 ret = -EINVAL;
68188- if (nr_segs > UIO_MAXIOV || nr_segs < 0)
68189+ if (nr_segs > UIO_MAXIOV)
68190 goto out;
68191 if (nr_segs > fast_segs) {
68192 ret = -ENOMEM;
68193@@ -844,6 +844,7 @@ struct compat_old_linux_dirent {
68194 struct compat_readdir_callback {
68195 struct dir_context ctx;
68196 struct compat_old_linux_dirent __user *dirent;
68197+ struct file * file;
68198 int result;
68199 };
68200
68201@@ -863,6 +864,10 @@ static int compat_fillonedir(struct dir_context *ctx, const char *name,
68202 buf->result = -EOVERFLOW;
68203 return -EOVERFLOW;
68204 }
68205+
68206+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
68207+ return 0;
68208+
68209 buf->result++;
68210 dirent = buf->dirent;
68211 if (!access_ok(VERIFY_WRITE, dirent,
68212@@ -894,6 +899,7 @@ COMPAT_SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
68213 if (!f.file)
68214 return -EBADF;
68215
68216+ buf.file = f.file;
68217 error = iterate_dir(f.file, &buf.ctx);
68218 if (buf.result)
68219 error = buf.result;
68220@@ -913,6 +919,7 @@ struct compat_getdents_callback {
68221 struct dir_context ctx;
68222 struct compat_linux_dirent __user *current_dir;
68223 struct compat_linux_dirent __user *previous;
68224+ struct file * file;
68225 int count;
68226 int error;
68227 };
68228@@ -935,6 +942,10 @@ static int compat_filldir(struct dir_context *ctx, const char *name, int namlen,
68229 buf->error = -EOVERFLOW;
68230 return -EOVERFLOW;
68231 }
68232+
68233+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
68234+ return 0;
68235+
68236 dirent = buf->previous;
68237 if (dirent) {
68238 if (__put_user(offset, &dirent->d_off))
68239@@ -980,6 +991,7 @@ COMPAT_SYSCALL_DEFINE3(getdents, unsigned int, fd,
68240 if (!f.file)
68241 return -EBADF;
68242
68243+ buf.file = f.file;
68244 error = iterate_dir(f.file, &buf.ctx);
68245 if (error >= 0)
68246 error = buf.error;
68247@@ -1000,6 +1012,7 @@ struct compat_getdents_callback64 {
68248 struct dir_context ctx;
68249 struct linux_dirent64 __user *current_dir;
68250 struct linux_dirent64 __user *previous;
68251+ struct file * file;
68252 int count;
68253 int error;
68254 };
68255@@ -1018,6 +1031,10 @@ static int compat_filldir64(struct dir_context *ctx, const char *name,
68256 buf->error = -EINVAL; /* only used if we fail.. */
68257 if (reclen > buf->count)
68258 return -EINVAL;
68259+
68260+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
68261+ return 0;
68262+
68263 dirent = buf->previous;
68264
68265 if (dirent) {
68266@@ -1067,6 +1084,7 @@ COMPAT_SYSCALL_DEFINE3(getdents64, unsigned int, fd,
68267 if (!f.file)
68268 return -EBADF;
68269
68270+ buf.file = f.file;
68271 error = iterate_dir(f.file, &buf.ctx);
68272 if (error >= 0)
68273 error = buf.error;
68274diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
68275index 4d24d17..4f8c09e 100644
68276--- a/fs/compat_binfmt_elf.c
68277+++ b/fs/compat_binfmt_elf.c
68278@@ -30,11 +30,13 @@
68279 #undef elf_phdr
68280 #undef elf_shdr
68281 #undef elf_note
68282+#undef elf_dyn
68283 #undef elf_addr_t
68284 #define elfhdr elf32_hdr
68285 #define elf_phdr elf32_phdr
68286 #define elf_shdr elf32_shdr
68287 #define elf_note elf32_note
68288+#define elf_dyn Elf32_Dyn
68289 #define elf_addr_t Elf32_Addr
68290
68291 /*
68292diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
68293index afec645..9c65620 100644
68294--- a/fs/compat_ioctl.c
68295+++ b/fs/compat_ioctl.c
68296@@ -621,7 +621,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd,
68297 return -EFAULT;
68298 if (__get_user(udata, &ss32->iomem_base))
68299 return -EFAULT;
68300- ss.iomem_base = compat_ptr(udata);
68301+ ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
68302 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
68303 __get_user(ss.port_high, &ss32->port_high))
68304 return -EFAULT;
68305@@ -703,8 +703,8 @@ static int do_i2c_rdwr_ioctl(unsigned int fd, unsigned int cmd,
68306 for (i = 0; i < nmsgs; i++) {
68307 if (copy_in_user(&tmsgs[i].addr, &umsgs[i].addr, 3*sizeof(u16)))
68308 return -EFAULT;
68309- if (get_user(datap, &umsgs[i].buf) ||
68310- put_user(compat_ptr(datap), &tmsgs[i].buf))
68311+ if (get_user(datap, (compat_caddr_t __user *)&umsgs[i].buf) ||
68312+ put_user(compat_ptr(datap), (u8 __user * __user *)&tmsgs[i].buf))
68313 return -EFAULT;
68314 }
68315 return sys_ioctl(fd, cmd, (unsigned long)tdata);
68316@@ -797,7 +797,7 @@ static int compat_ioctl_preallocate(struct file *file,
68317 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
68318 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
68319 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
68320- copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
68321+ copy_in_user(p->l_pad, p32->l_pad, 4*sizeof(u32)))
68322 return -EFAULT;
68323
68324 return ioctl_preallocate(file, p);
68325@@ -1618,8 +1618,8 @@ COMPAT_SYSCALL_DEFINE3(ioctl, unsigned int, fd, unsigned int, cmd,
68326 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
68327 {
68328 unsigned int a, b;
68329- a = *(unsigned int *)p;
68330- b = *(unsigned int *)q;
68331+ a = *(const unsigned int *)p;
68332+ b = *(const unsigned int *)q;
68333 if (a > b)
68334 return 1;
68335 if (a < b)
68336diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
68337index cf0db00..c7f70e8 100644
68338--- a/fs/configfs/dir.c
68339+++ b/fs/configfs/dir.c
68340@@ -1540,7 +1540,8 @@ static int configfs_readdir(struct file *file, struct dir_context *ctx)
68341 }
68342 for (p = q->next; p != &parent_sd->s_children; p = p->next) {
68343 struct configfs_dirent *next;
68344- const char *name;
68345+ const unsigned char * name;
68346+ char d_name[sizeof(next->s_dentry->d_iname)];
68347 int len;
68348 struct inode *inode = NULL;
68349
68350@@ -1549,7 +1550,12 @@ static int configfs_readdir(struct file *file, struct dir_context *ctx)
68351 continue;
68352
68353 name = configfs_get_name(next);
68354- len = strlen(name);
68355+ if (next->s_dentry && name == next->s_dentry->d_iname) {
68356+ len = next->s_dentry->d_name.len;
68357+ memcpy(d_name, name, len);
68358+ name = d_name;
68359+ } else
68360+ len = strlen(name);
68361
68362 /*
68363 * We'll have a dentry and an inode for
68364diff --git a/fs/configfs/item.c b/fs/configfs/item.c
68365index e65f9ff..3ed264d 100644
68366--- a/fs/configfs/item.c
68367+++ b/fs/configfs/item.c
68368@@ -116,7 +116,7 @@ void config_item_init_type_name(struct config_item *item,
68369 const char *name,
68370 struct config_item_type *type)
68371 {
68372- config_item_set_name(item, name);
68373+ config_item_set_name(item, "%s", name);
68374 item->ci_type = type;
68375 config_item_init(item);
68376 }
68377@@ -125,7 +125,7 @@ EXPORT_SYMBOL(config_item_init_type_name);
68378 void config_group_init_type_name(struct config_group *group, const char *name,
68379 struct config_item_type *type)
68380 {
68381- config_item_set_name(&group->cg_item, name);
68382+ config_item_set_name(&group->cg_item, "%s", name);
68383 group->cg_item.ci_type = type;
68384 config_group_init(group);
68385 }
68386diff --git a/fs/coredump.c b/fs/coredump.c
68387index bbbe139..b76fae5 100644
68388--- a/fs/coredump.c
68389+++ b/fs/coredump.c
68390@@ -450,8 +450,8 @@ static void wait_for_dump_helpers(struct file *file)
68391 struct pipe_inode_info *pipe = file->private_data;
68392
68393 pipe_lock(pipe);
68394- pipe->readers++;
68395- pipe->writers--;
68396+ atomic_inc(&pipe->readers);
68397+ atomic_dec(&pipe->writers);
68398 wake_up_interruptible_sync(&pipe->wait);
68399 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
68400 pipe_unlock(pipe);
68401@@ -460,11 +460,11 @@ static void wait_for_dump_helpers(struct file *file)
68402 * We actually want wait_event_freezable() but then we need
68403 * to clear TIF_SIGPENDING and improve dump_interrupted().
68404 */
68405- wait_event_interruptible(pipe->wait, pipe->readers == 1);
68406+ wait_event_interruptible(pipe->wait, atomic_read(&pipe->readers) == 1);
68407
68408 pipe_lock(pipe);
68409- pipe->readers--;
68410- pipe->writers++;
68411+ atomic_dec(&pipe->readers);
68412+ atomic_inc(&pipe->writers);
68413 pipe_unlock(pipe);
68414 }
68415
68416@@ -511,7 +511,9 @@ void do_coredump(const siginfo_t *siginfo)
68417 struct files_struct *displaced;
68418 bool need_nonrelative = false;
68419 bool core_dumped = false;
68420- static atomic_t core_dump_count = ATOMIC_INIT(0);
68421+ static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
68422+ long signr = siginfo->si_signo;
68423+ int dumpable;
68424 struct coredump_params cprm = {
68425 .siginfo = siginfo,
68426 .regs = signal_pt_regs(),
68427@@ -524,12 +526,17 @@ void do_coredump(const siginfo_t *siginfo)
68428 .mm_flags = mm->flags,
68429 };
68430
68431- audit_core_dumps(siginfo->si_signo);
68432+ audit_core_dumps(signr);
68433+
68434+ dumpable = __get_dumpable(cprm.mm_flags);
68435+
68436+ if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
68437+ gr_handle_brute_attach(dumpable);
68438
68439 binfmt = mm->binfmt;
68440 if (!binfmt || !binfmt->core_dump)
68441 goto fail;
68442- if (!__get_dumpable(cprm.mm_flags))
68443+ if (!dumpable)
68444 goto fail;
68445
68446 cred = prepare_creds();
68447@@ -548,7 +555,7 @@ void do_coredump(const siginfo_t *siginfo)
68448 need_nonrelative = true;
68449 }
68450
68451- retval = coredump_wait(siginfo->si_signo, &core_state);
68452+ retval = coredump_wait(signr, &core_state);
68453 if (retval < 0)
68454 goto fail_creds;
68455
68456@@ -591,7 +598,7 @@ void do_coredump(const siginfo_t *siginfo)
68457 }
68458 cprm.limit = RLIM_INFINITY;
68459
68460- dump_count = atomic_inc_return(&core_dump_count);
68461+ dump_count = atomic_inc_return_unchecked(&core_dump_count);
68462 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
68463 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
68464 task_tgid_vnr(current), current->comm);
68465@@ -623,6 +630,8 @@ void do_coredump(const siginfo_t *siginfo)
68466 } else {
68467 struct inode *inode;
68468
68469+ gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
68470+
68471 if (cprm.limit < binfmt->min_coredump)
68472 goto fail_unlock;
68473
68474@@ -681,7 +690,7 @@ close_fail:
68475 filp_close(cprm.file, NULL);
68476 fail_dropcount:
68477 if (ispipe)
68478- atomic_dec(&core_dump_count);
68479+ atomic_dec_unchecked(&core_dump_count);
68480 fail_unlock:
68481 kfree(cn.corename);
68482 coredump_finish(mm, core_dumped);
68483@@ -702,6 +711,8 @@ int dump_emit(struct coredump_params *cprm, const void *addr, int nr)
68484 struct file *file = cprm->file;
68485 loff_t pos = file->f_pos;
68486 ssize_t n;
68487+
68488+ gr_learn_resource(current, RLIMIT_CORE, cprm->written + nr, 1);
68489 if (cprm->written + nr > cprm->limit)
68490 return 0;
68491 while (nr) {
68492diff --git a/fs/dcache.c b/fs/dcache.c
68493index b05c557..4bcc589 100644
68494--- a/fs/dcache.c
68495+++ b/fs/dcache.c
68496@@ -511,7 +511,7 @@ static void __dentry_kill(struct dentry *dentry)
68497 * dentry_iput drops the locks, at which point nobody (except
68498 * transient RCU lookups) can reach this dentry.
68499 */
68500- BUG_ON(dentry->d_lockref.count > 0);
68501+ BUG_ON(__lockref_read(&dentry->d_lockref) > 0);
68502 this_cpu_dec(nr_dentry);
68503 if (dentry->d_op && dentry->d_op->d_release)
68504 dentry->d_op->d_release(dentry);
68505@@ -564,7 +564,7 @@ static inline struct dentry *lock_parent(struct dentry *dentry)
68506 struct dentry *parent = dentry->d_parent;
68507 if (IS_ROOT(dentry))
68508 return NULL;
68509- if (unlikely(dentry->d_lockref.count < 0))
68510+ if (unlikely(__lockref_read(&dentry->d_lockref) < 0))
68511 return NULL;
68512 if (likely(spin_trylock(&parent->d_lock)))
68513 return parent;
68514@@ -626,8 +626,8 @@ static inline bool fast_dput(struct dentry *dentry)
68515 */
68516 if (unlikely(ret < 0)) {
68517 spin_lock(&dentry->d_lock);
68518- if (dentry->d_lockref.count > 1) {
68519- dentry->d_lockref.count--;
68520+ if (__lockref_read(&dentry->d_lockref) > 1) {
68521+ __lockref_dec(&dentry->d_lockref);
68522 spin_unlock(&dentry->d_lock);
68523 return 1;
68524 }
68525@@ -682,7 +682,7 @@ static inline bool fast_dput(struct dentry *dentry)
68526 * else could have killed it and marked it dead. Either way, we
68527 * don't need to do anything else.
68528 */
68529- if (dentry->d_lockref.count) {
68530+ if (__lockref_read(&dentry->d_lockref)) {
68531 spin_unlock(&dentry->d_lock);
68532 return 1;
68533 }
68534@@ -692,7 +692,7 @@ static inline bool fast_dput(struct dentry *dentry)
68535 * lock, and we just tested that it was zero, so we can just
68536 * set it to 1.
68537 */
68538- dentry->d_lockref.count = 1;
68539+ __lockref_set(&dentry->d_lockref, 1);
68540 return 0;
68541 }
68542
68543@@ -751,7 +751,7 @@ repeat:
68544 dentry->d_flags |= DCACHE_REFERENCED;
68545 dentry_lru_add(dentry);
68546
68547- dentry->d_lockref.count--;
68548+ __lockref_dec(&dentry->d_lockref);
68549 spin_unlock(&dentry->d_lock);
68550 return;
68551
68552@@ -766,7 +766,7 @@ EXPORT_SYMBOL(dput);
68553 /* This must be called with d_lock held */
68554 static inline void __dget_dlock(struct dentry *dentry)
68555 {
68556- dentry->d_lockref.count++;
68557+ __lockref_inc(&dentry->d_lockref);
68558 }
68559
68560 static inline void __dget(struct dentry *dentry)
68561@@ -807,8 +807,8 @@ repeat:
68562 goto repeat;
68563 }
68564 rcu_read_unlock();
68565- BUG_ON(!ret->d_lockref.count);
68566- ret->d_lockref.count++;
68567+ BUG_ON(!__lockref_read(&ret->d_lockref));
68568+ __lockref_inc(&ret->d_lockref);
68569 spin_unlock(&ret->d_lock);
68570 return ret;
68571 }
68572@@ -886,9 +886,9 @@ restart:
68573 spin_lock(&inode->i_lock);
68574 hlist_for_each_entry(dentry, &inode->i_dentry, d_u.d_alias) {
68575 spin_lock(&dentry->d_lock);
68576- if (!dentry->d_lockref.count) {
68577+ if (!__lockref_read(&dentry->d_lockref)) {
68578 struct dentry *parent = lock_parent(dentry);
68579- if (likely(!dentry->d_lockref.count)) {
68580+ if (likely(!__lockref_read(&dentry->d_lockref))) {
68581 __dentry_kill(dentry);
68582 dput(parent);
68583 goto restart;
68584@@ -923,7 +923,7 @@ static void shrink_dentry_list(struct list_head *list)
68585 * We found an inuse dentry which was not removed from
68586 * the LRU because of laziness during lookup. Do not free it.
68587 */
68588- if (dentry->d_lockref.count > 0) {
68589+ if (__lockref_read(&dentry->d_lockref) > 0) {
68590 spin_unlock(&dentry->d_lock);
68591 if (parent)
68592 spin_unlock(&parent->d_lock);
68593@@ -961,8 +961,8 @@ static void shrink_dentry_list(struct list_head *list)
68594 dentry = parent;
68595 while (dentry && !lockref_put_or_lock(&dentry->d_lockref)) {
68596 parent = lock_parent(dentry);
68597- if (dentry->d_lockref.count != 1) {
68598- dentry->d_lockref.count--;
68599+ if (__lockref_read(&dentry->d_lockref) != 1) {
68600+ __lockref_inc(&dentry->d_lockref);
68601 spin_unlock(&dentry->d_lock);
68602 if (parent)
68603 spin_unlock(&parent->d_lock);
68604@@ -1002,7 +1002,7 @@ static enum lru_status dentry_lru_isolate(struct list_head *item,
68605 * counts, just remove them from the LRU. Otherwise give them
68606 * another pass through the LRU.
68607 */
68608- if (dentry->d_lockref.count) {
68609+ if (__lockref_read(&dentry->d_lockref)) {
68610 d_lru_isolate(lru, dentry);
68611 spin_unlock(&dentry->d_lock);
68612 return LRU_REMOVED;
68613@@ -1336,7 +1336,7 @@ static enum d_walk_ret select_collect(void *_data, struct dentry *dentry)
68614 } else {
68615 if (dentry->d_flags & DCACHE_LRU_LIST)
68616 d_lru_del(dentry);
68617- if (!dentry->d_lockref.count) {
68618+ if (!__lockref_read(&dentry->d_lockref)) {
68619 d_shrink_add(dentry, &data->dispose);
68620 data->found++;
68621 }
68622@@ -1384,7 +1384,7 @@ static enum d_walk_ret umount_check(void *_data, struct dentry *dentry)
68623 return D_WALK_CONTINUE;
68624
68625 /* root with refcount 1 is fine */
68626- if (dentry == _data && dentry->d_lockref.count == 1)
68627+ if (dentry == _data && __lockref_read(&dentry->d_lockref) == 1)
68628 return D_WALK_CONTINUE;
68629
68630 printk(KERN_ERR "BUG: Dentry %p{i=%lx,n=%pd} "
68631@@ -1393,7 +1393,7 @@ static enum d_walk_ret umount_check(void *_data, struct dentry *dentry)
68632 dentry->d_inode ?
68633 dentry->d_inode->i_ino : 0UL,
68634 dentry,
68635- dentry->d_lockref.count,
68636+ __lockref_read(&dentry->d_lockref),
68637 dentry->d_sb->s_type->name,
68638 dentry->d_sb->s_id);
68639 WARN_ON(1);
68640@@ -1534,7 +1534,7 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
68641 dentry->d_iname[DNAME_INLINE_LEN-1] = 0;
68642 if (name->len > DNAME_INLINE_LEN-1) {
68643 size_t size = offsetof(struct external_name, name[1]);
68644- struct external_name *p = kmalloc(size + name->len, GFP_KERNEL);
68645+ struct external_name *p = kmalloc(round_up(size + name->len, sizeof(unsigned long)), GFP_KERNEL);
68646 if (!p) {
68647 kmem_cache_free(dentry_cache, dentry);
68648 return NULL;
68649@@ -1557,7 +1557,7 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
68650 smp_wmb();
68651 dentry->d_name.name = dname;
68652
68653- dentry->d_lockref.count = 1;
68654+ __lockref_set(&dentry->d_lockref, 1);
68655 dentry->d_flags = 0;
68656 spin_lock_init(&dentry->d_lock);
68657 seqcount_init(&dentry->d_seq);
68658@@ -1566,6 +1566,9 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
68659 dentry->d_sb = sb;
68660 dentry->d_op = NULL;
68661 dentry->d_fsdata = NULL;
68662+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
68663+ atomic_set(&dentry->chroot_refcnt, 0);
68664+#endif
68665 INIT_HLIST_BL_NODE(&dentry->d_hash);
68666 INIT_LIST_HEAD(&dentry->d_lru);
68667 INIT_LIST_HEAD(&dentry->d_subdirs);
68668@@ -2290,7 +2293,7 @@ struct dentry *__d_lookup(const struct dentry *parent, const struct qstr *name)
68669 goto next;
68670 }
68671
68672- dentry->d_lockref.count++;
68673+ __lockref_inc(&dentry->d_lockref);
68674 found = dentry;
68675 spin_unlock(&dentry->d_lock);
68676 break;
68677@@ -2358,7 +2361,7 @@ again:
68678 spin_lock(&dentry->d_lock);
68679 inode = dentry->d_inode;
68680 isdir = S_ISDIR(inode->i_mode);
68681- if (dentry->d_lockref.count == 1) {
68682+ if (__lockref_read(&dentry->d_lockref) == 1) {
68683 if (!spin_trylock(&inode->i_lock)) {
68684 spin_unlock(&dentry->d_lock);
68685 cpu_relax();
68686@@ -3300,7 +3303,7 @@ static enum d_walk_ret d_genocide_kill(void *data, struct dentry *dentry)
68687
68688 if (!(dentry->d_flags & DCACHE_GENOCIDE)) {
68689 dentry->d_flags |= DCACHE_GENOCIDE;
68690- dentry->d_lockref.count--;
68691+ __lockref_dec(&dentry->d_lockref);
68692 }
68693 }
68694 return D_WALK_CONTINUE;
68695@@ -3416,7 +3419,8 @@ void __init vfs_caches_init(unsigned long mempages)
68696 mempages -= reserve;
68697
68698 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
68699- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
68700+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY|
68701+ SLAB_NO_SANITIZE, NULL);
68702
68703 dcache_init();
68704 inode_init();
68705diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
68706index 96400ab..906103d 100644
68707--- a/fs/debugfs/inode.c
68708+++ b/fs/debugfs/inode.c
68709@@ -386,6 +386,10 @@ struct dentry *debugfs_create_file_size(const char *name, umode_t mode,
68710 }
68711 EXPORT_SYMBOL_GPL(debugfs_create_file_size);
68712
68713+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
68714+extern int grsec_enable_sysfs_restrict;
68715+#endif
68716+
68717 /**
68718 * debugfs_create_dir - create a directory in the debugfs filesystem
68719 * @name: a pointer to a string containing the name of the directory to
68720@@ -404,6 +408,10 @@ EXPORT_SYMBOL_GPL(debugfs_create_file_size);
68721 * If debugfs is not enabled in the kernel, the value -%ENODEV will be
68722 * returned.
68723 */
68724+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
68725+extern int grsec_enable_sysfs_restrict;
68726+#endif
68727+
68728 struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
68729 {
68730 struct dentry *dentry = start_creating(name, parent);
68731@@ -416,7 +424,12 @@ struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
68732 if (unlikely(!inode))
68733 return failed_creating(dentry);
68734
68735- inode->i_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
68736+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
68737+ if (grsec_enable_sysfs_restrict)
68738+ inode->i_mode = S_IFDIR | S_IRWXU;
68739+ else
68740+#endif
68741+ inode->i_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
68742 inode->i_op = &simple_dir_inode_operations;
68743 inode->i_fop = &simple_dir_operations;
68744
68745diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
68746index b08b518..d6acffa 100644
68747--- a/fs/ecryptfs/inode.c
68748+++ b/fs/ecryptfs/inode.c
68749@@ -663,7 +663,7 @@ static char *ecryptfs_readlink_lower(struct dentry *dentry, size_t *bufsiz)
68750 old_fs = get_fs();
68751 set_fs(get_ds());
68752 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
68753- (char __user *)lower_buf,
68754+ (char __force_user *)lower_buf,
68755 PATH_MAX);
68756 set_fs(old_fs);
68757 if (rc < 0)
68758diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
68759index e4141f2..d8263e8 100644
68760--- a/fs/ecryptfs/miscdev.c
68761+++ b/fs/ecryptfs/miscdev.c
68762@@ -304,7 +304,7 @@ check_list:
68763 goto out_unlock_msg_ctx;
68764 i = PKT_TYPE_SIZE + PKT_CTR_SIZE;
68765 if (msg_ctx->msg) {
68766- if (copy_to_user(&buf[i], packet_length, packet_length_size))
68767+ if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
68768 goto out_unlock_msg_ctx;
68769 i += packet_length_size;
68770 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
68771diff --git a/fs/exec.c b/fs/exec.c
68772index 1202445..620c98e 100644
68773--- a/fs/exec.c
68774+++ b/fs/exec.c
68775@@ -56,8 +56,20 @@
68776 #include <linux/pipe_fs_i.h>
68777 #include <linux/oom.h>
68778 #include <linux/compat.h>
68779+#include <linux/random.h>
68780+#include <linux/seq_file.h>
68781+#include <linux/coredump.h>
68782+#include <linux/mman.h>
68783+
68784+#ifdef CONFIG_PAX_REFCOUNT
68785+#include <linux/kallsyms.h>
68786+#include <linux/kdebug.h>
68787+#endif
68788+
68789+#include <trace/events/fs.h>
68790
68791 #include <asm/uaccess.h>
68792+#include <asm/sections.h>
68793 #include <asm/mmu_context.h>
68794 #include <asm/tlb.h>
68795
68796@@ -66,19 +78,34 @@
68797
68798 #include <trace/events/sched.h>
68799
68800+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
68801+void __weak pax_set_initial_flags(struct linux_binprm *bprm)
68802+{
68803+ pr_warn_once("PAX: PAX_HAVE_ACL_FLAGS was enabled without providing the pax_set_initial_flags callback, this is probably not what you wanted.\n");
68804+}
68805+#endif
68806+
68807+#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
68808+void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
68809+EXPORT_SYMBOL(pax_set_initial_flags_func);
68810+#endif
68811+
68812 int suid_dumpable = 0;
68813
68814 static LIST_HEAD(formats);
68815 static DEFINE_RWLOCK(binfmt_lock);
68816
68817+extern int gr_process_kernel_exec_ban(void);
68818+extern int gr_process_suid_exec_ban(const struct linux_binprm *bprm);
68819+
68820 void __register_binfmt(struct linux_binfmt * fmt, int insert)
68821 {
68822 BUG_ON(!fmt);
68823 if (WARN_ON(!fmt->load_binary))
68824 return;
68825 write_lock(&binfmt_lock);
68826- insert ? list_add(&fmt->lh, &formats) :
68827- list_add_tail(&fmt->lh, &formats);
68828+ insert ? pax_list_add((struct list_head *)&fmt->lh, &formats) :
68829+ pax_list_add_tail((struct list_head *)&fmt->lh, &formats);
68830 write_unlock(&binfmt_lock);
68831 }
68832
68833@@ -87,7 +114,7 @@ EXPORT_SYMBOL(__register_binfmt);
68834 void unregister_binfmt(struct linux_binfmt * fmt)
68835 {
68836 write_lock(&binfmt_lock);
68837- list_del(&fmt->lh);
68838+ pax_list_del((struct list_head *)&fmt->lh);
68839 write_unlock(&binfmt_lock);
68840 }
68841
68842@@ -183,18 +210,10 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
68843 int write)
68844 {
68845 struct page *page;
68846- int ret;
68847
68848-#ifdef CONFIG_STACK_GROWSUP
68849- if (write) {
68850- ret = expand_downwards(bprm->vma, pos);
68851- if (ret < 0)
68852- return NULL;
68853- }
68854-#endif
68855- ret = get_user_pages(current, bprm->mm, pos,
68856- 1, write, 1, &page, NULL);
68857- if (ret <= 0)
68858+ if (0 > expand_downwards(bprm->vma, pos))
68859+ return NULL;
68860+ if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
68861 return NULL;
68862
68863 if (write) {
68864@@ -210,6 +229,17 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
68865 if (size <= ARG_MAX)
68866 return page;
68867
68868+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68869+ // only allow 512KB for argv+env on suid/sgid binaries
68870+ // to prevent easy ASLR exhaustion
68871+ if (((!uid_eq(bprm->cred->euid, current_euid())) ||
68872+ (!gid_eq(bprm->cred->egid, current_egid()))) &&
68873+ (size > (512 * 1024))) {
68874+ put_page(page);
68875+ return NULL;
68876+ }
68877+#endif
68878+
68879 /*
68880 * Limit to 1/4-th the stack size for the argv+env strings.
68881 * This ensures that:
68882@@ -269,6 +299,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
68883 vma->vm_end = STACK_TOP_MAX;
68884 vma->vm_start = vma->vm_end - PAGE_SIZE;
68885 vma->vm_flags = VM_SOFTDIRTY | VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
68886+
68887+#ifdef CONFIG_PAX_SEGMEXEC
68888+ vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
68889+#endif
68890+
68891 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
68892 INIT_LIST_HEAD(&vma->anon_vma_chain);
68893
68894@@ -280,6 +315,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
68895 arch_bprm_mm_init(mm, vma);
68896 up_write(&mm->mmap_sem);
68897 bprm->p = vma->vm_end - sizeof(void *);
68898+
68899+#ifdef CONFIG_PAX_RANDUSTACK
68900+ if (randomize_va_space)
68901+ bprm->p ^= prandom_u32() & ~PAGE_MASK;
68902+#endif
68903+
68904 return 0;
68905 err:
68906 up_write(&mm->mmap_sem);
68907@@ -396,7 +437,7 @@ struct user_arg_ptr {
68908 } ptr;
68909 };
68910
68911-static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
68912+const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
68913 {
68914 const char __user *native;
68915
68916@@ -405,14 +446,14 @@ static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
68917 compat_uptr_t compat;
68918
68919 if (get_user(compat, argv.ptr.compat + nr))
68920- return ERR_PTR(-EFAULT);
68921+ return (const char __force_user *)ERR_PTR(-EFAULT);
68922
68923 return compat_ptr(compat);
68924 }
68925 #endif
68926
68927 if (get_user(native, argv.ptr.native + nr))
68928- return ERR_PTR(-EFAULT);
68929+ return (const char __force_user *)ERR_PTR(-EFAULT);
68930
68931 return native;
68932 }
68933@@ -431,7 +472,7 @@ static int count(struct user_arg_ptr argv, int max)
68934 if (!p)
68935 break;
68936
68937- if (IS_ERR(p))
68938+ if (IS_ERR((const char __force_kernel *)p))
68939 return -EFAULT;
68940
68941 if (i >= max)
68942@@ -466,7 +507,7 @@ static int copy_strings(int argc, struct user_arg_ptr argv,
68943
68944 ret = -EFAULT;
68945 str = get_user_arg_ptr(argv, argc);
68946- if (IS_ERR(str))
68947+ if (IS_ERR((const char __force_kernel *)str))
68948 goto out;
68949
68950 len = strnlen_user(str, MAX_ARG_STRLEN);
68951@@ -548,7 +589,7 @@ int copy_strings_kernel(int argc, const char *const *__argv,
68952 int r;
68953 mm_segment_t oldfs = get_fs();
68954 struct user_arg_ptr argv = {
68955- .ptr.native = (const char __user *const __user *)__argv,
68956+ .ptr.native = (const char __user * const __force_user *)__argv,
68957 };
68958
68959 set_fs(KERNEL_DS);
68960@@ -583,7 +624,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
68961 unsigned long new_end = old_end - shift;
68962 struct mmu_gather tlb;
68963
68964- BUG_ON(new_start > new_end);
68965+ if (new_start >= new_end || new_start < mmap_min_addr)
68966+ return -ENOMEM;
68967
68968 /*
68969 * ensure there are no vmas between where we want to go
68970@@ -592,6 +634,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
68971 if (vma != find_vma(mm, new_start))
68972 return -EFAULT;
68973
68974+#ifdef CONFIG_PAX_SEGMEXEC
68975+ BUG_ON(pax_find_mirror_vma(vma));
68976+#endif
68977+
68978 /*
68979 * cover the whole range: [new_start, old_end)
68980 */
68981@@ -675,10 +721,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
68982 stack_top = arch_align_stack(stack_top);
68983 stack_top = PAGE_ALIGN(stack_top);
68984
68985- if (unlikely(stack_top < mmap_min_addr) ||
68986- unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
68987- return -ENOMEM;
68988-
68989 stack_shift = vma->vm_end - stack_top;
68990
68991 bprm->p -= stack_shift;
68992@@ -690,8 +732,28 @@ int setup_arg_pages(struct linux_binprm *bprm,
68993 bprm->exec -= stack_shift;
68994
68995 down_write(&mm->mmap_sem);
68996+
68997+ /* Move stack pages down in memory. */
68998+ if (stack_shift) {
68999+ ret = shift_arg_pages(vma, stack_shift);
69000+ if (ret)
69001+ goto out_unlock;
69002+ }
69003+
69004 vm_flags = VM_STACK_FLAGS;
69005
69006+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
69007+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
69008+ vm_flags &= ~VM_EXEC;
69009+
69010+#ifdef CONFIG_PAX_MPROTECT
69011+ if (mm->pax_flags & MF_PAX_MPROTECT)
69012+ vm_flags &= ~VM_MAYEXEC;
69013+#endif
69014+
69015+ }
69016+#endif
69017+
69018 /*
69019 * Adjust stack execute permissions; explicitly enable for
69020 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
69021@@ -710,13 +772,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
69022 goto out_unlock;
69023 BUG_ON(prev != vma);
69024
69025- /* Move stack pages down in memory. */
69026- if (stack_shift) {
69027- ret = shift_arg_pages(vma, stack_shift);
69028- if (ret)
69029- goto out_unlock;
69030- }
69031-
69032 /* mprotect_fixup is overkill to remove the temporary stack flags */
69033 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
69034
69035@@ -740,6 +795,27 @@ int setup_arg_pages(struct linux_binprm *bprm,
69036 #endif
69037 current->mm->start_stack = bprm->p;
69038 ret = expand_stack(vma, stack_base);
69039+
69040+#if !defined(CONFIG_STACK_GROWSUP) && defined(CONFIG_PAX_RANDMMAP)
69041+ if (!ret && (mm->pax_flags & MF_PAX_RANDMMAP) && STACK_TOP <= 0xFFFFFFFFU && STACK_TOP > vma->vm_end) {
69042+ unsigned long size;
69043+ vm_flags_t vm_flags;
69044+
69045+ size = STACK_TOP - vma->vm_end;
69046+ vm_flags = VM_NONE | VM_DONTEXPAND | VM_DONTDUMP;
69047+
69048+ ret = vma->vm_end != mmap_region(NULL, vma->vm_end, size, vm_flags, 0);
69049+
69050+#ifdef CONFIG_X86
69051+ if (!ret) {
69052+ size = PAGE_SIZE + mmap_min_addr + ((mm->delta_mmap ^ mm->delta_stack) & (0xFFUL << PAGE_SHIFT));
69053+ ret = 0 != mmap_region(NULL, 0, PAGE_ALIGN(size), vm_flags, 0);
69054+ }
69055+#endif
69056+
69057+ }
69058+#endif
69059+
69060 if (ret)
69061 ret = -EFAULT;
69062
69063@@ -784,8 +860,10 @@ static struct file *do_open_execat(int fd, struct filename *name, int flags)
69064 if (err)
69065 goto exit;
69066
69067- if (name->name[0] != '\0')
69068+ if (name->name[0] != '\0') {
69069 fsnotify_open(file);
69070+ trace_open_exec(name->name);
69071+ }
69072
69073 out:
69074 return file;
69075@@ -818,7 +896,7 @@ int kernel_read(struct file *file, loff_t offset,
69076 old_fs = get_fs();
69077 set_fs(get_ds());
69078 /* The cast to a user pointer is valid due to the set_fs() */
69079- result = vfs_read(file, (void __user *)addr, count, &pos);
69080+ result = vfs_read(file, (void __force_user *)addr, count, &pos);
69081 set_fs(old_fs);
69082 return result;
69083 }
69084@@ -863,6 +941,7 @@ static int exec_mmap(struct mm_struct *mm)
69085 tsk->mm = mm;
69086 tsk->active_mm = mm;
69087 activate_mm(active_mm, mm);
69088+ populate_stack();
69089 tsk->mm->vmacache_seqnum = 0;
69090 vmacache_flush(tsk);
69091 task_unlock(tsk);
69092@@ -929,10 +1008,14 @@ static int de_thread(struct task_struct *tsk)
69093 if (!thread_group_leader(tsk)) {
69094 struct task_struct *leader = tsk->group_leader;
69095
69096- sig->notify_count = -1; /* for exit_notify() */
69097 for (;;) {
69098 threadgroup_change_begin(tsk);
69099 write_lock_irq(&tasklist_lock);
69100+ /*
69101+ * Do this under tasklist_lock to ensure that
69102+ * exit_notify() can't miss ->group_exit_task
69103+ */
69104+ sig->notify_count = -1;
69105 if (likely(leader->exit_state))
69106 break;
69107 __set_current_state(TASK_KILLABLE);
69108@@ -1261,7 +1344,7 @@ static void check_unsafe_exec(struct linux_binprm *bprm)
69109 }
69110 rcu_read_unlock();
69111
69112- if (p->fs->users > n_fs)
69113+ if (atomic_read(&p->fs->users) > n_fs)
69114 bprm->unsafe |= LSM_UNSAFE_SHARE;
69115 else
69116 p->fs->in_exec = 1;
69117@@ -1462,6 +1545,31 @@ static int exec_binprm(struct linux_binprm *bprm)
69118 return ret;
69119 }
69120
69121+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
69122+static DEFINE_PER_CPU(u64, exec_counter);
69123+static int __init init_exec_counters(void)
69124+{
69125+ unsigned int cpu;
69126+
69127+ for_each_possible_cpu(cpu) {
69128+ per_cpu(exec_counter, cpu) = (u64)cpu;
69129+ }
69130+
69131+ return 0;
69132+}
69133+early_initcall(init_exec_counters);
69134+static inline void increment_exec_counter(void)
69135+{
69136+ BUILD_BUG_ON(NR_CPUS > (1 << 16));
69137+ current->exec_id = this_cpu_add_return(exec_counter, 1 << 16);
69138+}
69139+#else
69140+static inline void increment_exec_counter(void) {}
69141+#endif
69142+
69143+extern void gr_handle_exec_args(struct linux_binprm *bprm,
69144+ struct user_arg_ptr argv);
69145+
69146 /*
69147 * sys_execve() executes a new program.
69148 */
69149@@ -1470,6 +1578,11 @@ static int do_execveat_common(int fd, struct filename *filename,
69150 struct user_arg_ptr envp,
69151 int flags)
69152 {
69153+#ifdef CONFIG_GRKERNSEC
69154+ struct file *old_exec_file;
69155+ struct acl_subject_label *old_acl;
69156+ struct rlimit old_rlim[RLIM_NLIMITS];
69157+#endif
69158 char *pathbuf = NULL;
69159 struct linux_binprm *bprm;
69160 struct file *file;
69161@@ -1479,6 +1592,8 @@ static int do_execveat_common(int fd, struct filename *filename,
69162 if (IS_ERR(filename))
69163 return PTR_ERR(filename);
69164
69165+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current_user()->processes), 1);
69166+
69167 /*
69168 * We move the actual failure in case of RLIMIT_NPROC excess from
69169 * set*uid() to execve() because too many poorly written programs
69170@@ -1516,6 +1631,11 @@ static int do_execveat_common(int fd, struct filename *filename,
69171 if (IS_ERR(file))
69172 goto out_unmark;
69173
69174+ if (gr_ptrace_readexec(file, bprm->unsafe)) {
69175+ retval = -EPERM;
69176+ goto out_unmark;
69177+ }
69178+
69179 sched_exec();
69180
69181 bprm->file = file;
69182@@ -1542,6 +1662,11 @@ static int do_execveat_common(int fd, struct filename *filename,
69183 }
69184 bprm->interp = bprm->filename;
69185
69186+ if (!gr_acl_handle_execve(file->f_path.dentry, file->f_path.mnt)) {
69187+ retval = -EACCES;
69188+ goto out_unmark;
69189+ }
69190+
69191 retval = bprm_mm_init(bprm);
69192 if (retval)
69193 goto out_unmark;
69194@@ -1558,24 +1683,70 @@ static int do_execveat_common(int fd, struct filename *filename,
69195 if (retval < 0)
69196 goto out;
69197
69198+#ifdef CONFIG_GRKERNSEC
69199+ old_acl = current->acl;
69200+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
69201+ old_exec_file = current->exec_file;
69202+ get_file(file);
69203+ current->exec_file = file;
69204+#endif
69205+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
69206+ /* limit suid stack to 8MB
69207+ * we saved the old limits above and will restore them if this exec fails
69208+ */
69209+ if (((!uid_eq(bprm->cred->euid, current_euid())) || (!gid_eq(bprm->cred->egid, current_egid()))) &&
69210+ (old_rlim[RLIMIT_STACK].rlim_cur > (8 * 1024 * 1024)))
69211+ current->signal->rlim[RLIMIT_STACK].rlim_cur = 8 * 1024 * 1024;
69212+#endif
69213+
69214+ if (gr_process_kernel_exec_ban() || gr_process_suid_exec_ban(bprm)) {
69215+ retval = -EPERM;
69216+ goto out_fail;
69217+ }
69218+
69219+ if (!gr_tpe_allow(file)) {
69220+ retval = -EACCES;
69221+ goto out_fail;
69222+ }
69223+
69224+ if (gr_check_crash_exec(file)) {
69225+ retval = -EACCES;
69226+ goto out_fail;
69227+ }
69228+
69229+ retval = gr_set_proc_label(file->f_path.dentry, file->f_path.mnt,
69230+ bprm->unsafe);
69231+ if (retval < 0)
69232+ goto out_fail;
69233+
69234 retval = copy_strings_kernel(1, &bprm->filename, bprm);
69235 if (retval < 0)
69236- goto out;
69237+ goto out_fail;
69238
69239 bprm->exec = bprm->p;
69240 retval = copy_strings(bprm->envc, envp, bprm);
69241 if (retval < 0)
69242- goto out;
69243+ goto out_fail;
69244
69245 retval = copy_strings(bprm->argc, argv, bprm);
69246 if (retval < 0)
69247- goto out;
69248+ goto out_fail;
69249+
69250+ gr_log_chroot_exec(file->f_path.dentry, file->f_path.mnt);
69251+
69252+ gr_handle_exec_args(bprm, argv);
69253
69254 retval = exec_binprm(bprm);
69255 if (retval < 0)
69256- goto out;
69257+ goto out_fail;
69258+#ifdef CONFIG_GRKERNSEC
69259+ if (old_exec_file)
69260+ fput(old_exec_file);
69261+#endif
69262
69263 /* execve succeeded */
69264+
69265+ increment_exec_counter();
69266 current->fs->in_exec = 0;
69267 current->in_execve = 0;
69268 acct_update_integrals(current);
69269@@ -1587,6 +1758,14 @@ static int do_execveat_common(int fd, struct filename *filename,
69270 put_files_struct(displaced);
69271 return retval;
69272
69273+out_fail:
69274+#ifdef CONFIG_GRKERNSEC
69275+ current->acl = old_acl;
69276+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
69277+ fput(current->exec_file);
69278+ current->exec_file = old_exec_file;
69279+#endif
69280+
69281 out:
69282 if (bprm->mm) {
69283 acct_arg_size(bprm, 0);
69284@@ -1733,3 +1912,312 @@ COMPAT_SYSCALL_DEFINE5(execveat, int, fd,
69285 argv, envp, flags);
69286 }
69287 #endif
69288+
69289+int pax_check_flags(unsigned long *flags)
69290+{
69291+ int retval = 0;
69292+
69293+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
69294+ if (*flags & MF_PAX_SEGMEXEC)
69295+ {
69296+ *flags &= ~MF_PAX_SEGMEXEC;
69297+ retval = -EINVAL;
69298+ }
69299+#endif
69300+
69301+ if ((*flags & MF_PAX_PAGEEXEC)
69302+
69303+#ifdef CONFIG_PAX_PAGEEXEC
69304+ && (*flags & MF_PAX_SEGMEXEC)
69305+#endif
69306+
69307+ )
69308+ {
69309+ *flags &= ~MF_PAX_PAGEEXEC;
69310+ retval = -EINVAL;
69311+ }
69312+
69313+ if ((*flags & MF_PAX_MPROTECT)
69314+
69315+#ifdef CONFIG_PAX_MPROTECT
69316+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
69317+#endif
69318+
69319+ )
69320+ {
69321+ *flags &= ~MF_PAX_MPROTECT;
69322+ retval = -EINVAL;
69323+ }
69324+
69325+ if ((*flags & MF_PAX_EMUTRAMP)
69326+
69327+#ifdef CONFIG_PAX_EMUTRAMP
69328+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
69329+#endif
69330+
69331+ )
69332+ {
69333+ *flags &= ~MF_PAX_EMUTRAMP;
69334+ retval = -EINVAL;
69335+ }
69336+
69337+ return retval;
69338+}
69339+
69340+EXPORT_SYMBOL(pax_check_flags);
69341+
69342+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
69343+char *pax_get_path(const struct path *path, char *buf, int buflen)
69344+{
69345+ char *pathname = d_path(path, buf, buflen);
69346+
69347+ if (IS_ERR(pathname))
69348+ goto toolong;
69349+
69350+ pathname = mangle_path(buf, pathname, "\t\n\\");
69351+ if (!pathname)
69352+ goto toolong;
69353+
69354+ *pathname = 0;
69355+ return buf;
69356+
69357+toolong:
69358+ return "<path too long>";
69359+}
69360+EXPORT_SYMBOL(pax_get_path);
69361+
69362+void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
69363+{
69364+ struct task_struct *tsk = current;
69365+ struct mm_struct *mm = current->mm;
69366+ char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
69367+ char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
69368+ char *path_exec = NULL;
69369+ char *path_fault = NULL;
69370+ unsigned long start = 0UL, end = 0UL, offset = 0UL;
69371+ siginfo_t info = { };
69372+
69373+ if (buffer_exec && buffer_fault) {
69374+ struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
69375+
69376+ down_read(&mm->mmap_sem);
69377+ vma = mm->mmap;
69378+ while (vma && (!vma_exec || !vma_fault)) {
69379+ if (vma->vm_file && mm->exe_file == vma->vm_file && (vma->vm_flags & VM_EXEC))
69380+ vma_exec = vma;
69381+ if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
69382+ vma_fault = vma;
69383+ vma = vma->vm_next;
69384+ }
69385+ if (vma_exec)
69386+ path_exec = pax_get_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
69387+ if (vma_fault) {
69388+ start = vma_fault->vm_start;
69389+ end = vma_fault->vm_end;
69390+ offset = vma_fault->vm_pgoff << PAGE_SHIFT;
69391+ if (vma_fault->vm_file)
69392+ path_fault = pax_get_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
69393+ else if ((unsigned long)pc >= mm->start_brk && (unsigned long)pc < mm->brk)
69394+ path_fault = "<heap>";
69395+ else if (vma_fault->vm_flags & (VM_GROWSDOWN | VM_GROWSUP))
69396+ path_fault = "<stack>";
69397+ else
69398+ path_fault = "<anonymous mapping>";
69399+ }
69400+ up_read(&mm->mmap_sem);
69401+ }
69402+ if (tsk->signal->curr_ip)
69403+ printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
69404+ else
69405+ printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
69406+ printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
69407+ from_kuid_munged(&init_user_ns, task_uid(tsk)), from_kuid_munged(&init_user_ns, task_euid(tsk)), pc, sp);
69408+ free_page((unsigned long)buffer_exec);
69409+ free_page((unsigned long)buffer_fault);
69410+ pax_report_insns(regs, pc, sp);
69411+ info.si_signo = SIGKILL;
69412+ info.si_errno = 0;
69413+ info.si_code = SI_KERNEL;
69414+ info.si_pid = 0;
69415+ info.si_uid = 0;
69416+ do_coredump(&info);
69417+}
69418+#endif
69419+
69420+#ifdef CONFIG_PAX_REFCOUNT
69421+void pax_report_refcount_overflow(struct pt_regs *regs)
69422+{
69423+ if (current->signal->curr_ip)
69424+ printk(KERN_EMERG "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
69425+ &current->signal->curr_ip, current->comm, task_pid_nr(current),
69426+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
69427+ else
69428+ printk(KERN_EMERG "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n", current->comm, task_pid_nr(current),
69429+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
69430+ print_symbol(KERN_EMERG "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
69431+ preempt_disable();
69432+ show_regs(regs);
69433+ preempt_enable();
69434+ force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
69435+}
69436+#endif
69437+
69438+#ifdef CONFIG_PAX_USERCOPY
69439+/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
69440+static noinline int check_stack_object(const void *obj, unsigned long len)
69441+{
69442+ const void * const stack = task_stack_page(current);
69443+ const void * const stackend = stack + THREAD_SIZE;
69444+
69445+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
69446+ const void *frame = NULL;
69447+ const void *oldframe;
69448+#endif
69449+
69450+ if (obj + len < obj)
69451+ return -1;
69452+
69453+ if (obj + len <= stack || stackend <= obj)
69454+ return 0;
69455+
69456+ if (obj < stack || stackend < obj + len)
69457+ return -1;
69458+
69459+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
69460+ oldframe = __builtin_frame_address(1);
69461+ if (oldframe)
69462+ frame = __builtin_frame_address(2);
69463+ /*
69464+ low ----------------------------------------------> high
69465+ [saved bp][saved ip][args][local vars][saved bp][saved ip]
69466+ ^----------------^
69467+ allow copies only within here
69468+ */
69469+ while (stack <= frame && frame < stackend) {
69470+ /* if obj + len extends past the last frame, this
69471+ check won't pass and the next frame will be 0,
69472+ causing us to bail out and correctly report
69473+ the copy as invalid
69474+ */
69475+ if (obj + len <= frame)
69476+ return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
69477+ oldframe = frame;
69478+ frame = *(const void * const *)frame;
69479+ }
69480+ return -1;
69481+#else
69482+ return 1;
69483+#endif
69484+}
69485+
69486+static __noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to_user, const char *type)
69487+{
69488+ if (current->signal->curr_ip)
69489+ printk(KERN_EMERG "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
69490+ &current->signal->curr_ip, to_user ? "leak" : "overwrite", to_user ? "from" : "to", ptr, type ? : "unknown", len);
69491+ else
69492+ printk(KERN_EMERG "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
69493+ to_user ? "leak" : "overwrite", to_user ? "from" : "to", ptr, type ? : "unknown", len);
69494+ dump_stack();
69495+ gr_handle_kernel_exploit();
69496+ do_group_exit(SIGKILL);
69497+}
69498+#endif
69499+
69500+#ifdef CONFIG_PAX_USERCOPY
69501+
69502+static inline bool check_kernel_text_object(unsigned long low, unsigned long high)
69503+{
69504+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
69505+ unsigned long textlow = ktla_ktva((unsigned long)_stext);
69506+#ifdef CONFIG_MODULES
69507+ unsigned long texthigh = (unsigned long)MODULES_EXEC_VADDR;
69508+#else
69509+ unsigned long texthigh = ktla_ktva((unsigned long)_etext);
69510+#endif
69511+
69512+#else
69513+ unsigned long textlow = (unsigned long)_stext;
69514+ unsigned long texthigh = (unsigned long)_etext;
69515+
69516+#ifdef CONFIG_X86_64
69517+ /* check against linear mapping as well */
69518+ if (high > (unsigned long)__va(__pa(textlow)) &&
69519+ low < (unsigned long)__va(__pa(texthigh)))
69520+ return true;
69521+#endif
69522+
69523+#endif
69524+
69525+ if (high <= textlow || low >= texthigh)
69526+ return false;
69527+ else
69528+ return true;
69529+}
69530+#endif
69531+
69532+void __check_object_size(const void *ptr, unsigned long n, bool to_user, bool const_size)
69533+{
69534+#ifdef CONFIG_PAX_USERCOPY
69535+ const char *type;
69536+#endif
69537+
69538+#if !defined(CONFIG_STACK_GROWSUP) && !defined(CONFIG_X86_64)
69539+ unsigned long stackstart = (unsigned long)task_stack_page(current);
69540+ unsigned long currentsp = (unsigned long)&stackstart;
69541+ if (unlikely((currentsp < stackstart + 512 ||
69542+ currentsp >= stackstart + THREAD_SIZE) && !in_interrupt()))
69543+ BUG();
69544+#endif
69545+
69546+#ifndef CONFIG_PAX_USERCOPY_DEBUG
69547+ if (const_size)
69548+ return;
69549+#endif
69550+
69551+#ifdef CONFIG_PAX_USERCOPY
69552+ if (!n)
69553+ return;
69554+
69555+ type = check_heap_object(ptr, n);
69556+ if (!type) {
69557+ int ret = check_stack_object(ptr, n);
69558+ if (ret == 1 || ret == 2)
69559+ return;
69560+ if (ret == 0) {
69561+ if (check_kernel_text_object((unsigned long)ptr, (unsigned long)ptr + n))
69562+ type = "<kernel text>";
69563+ else
69564+ return;
69565+ } else
69566+ type = "<process stack>";
69567+ }
69568+
69569+ pax_report_usercopy(ptr, n, to_user, type);
69570+#endif
69571+
69572+}
69573+EXPORT_SYMBOL(__check_object_size);
69574+
69575+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
69576+void __used pax_track_stack(void)
69577+{
69578+ unsigned long sp = (unsigned long)&sp;
69579+ if (sp < current_thread_info()->lowest_stack &&
69580+ sp >= (unsigned long)task_stack_page(current) + 2 * sizeof(unsigned long))
69581+ current_thread_info()->lowest_stack = sp;
69582+ if (unlikely((sp & ~(THREAD_SIZE - 1)) < (THREAD_SIZE/16)))
69583+ BUG();
69584+}
69585+EXPORT_SYMBOL(pax_track_stack);
69586+#endif
69587+
69588+#ifdef CONFIG_PAX_SIZE_OVERFLOW
69589+void __nocapture(1, 3, 4) __used report_size_overflow(const char *file, unsigned int line, const char *func, const char *ssa_name)
69590+{
69591+ printk(KERN_EMERG "PAX: size overflow detected in function %s %s:%u %s", func, file, line, ssa_name);
69592+ dump_stack();
69593+ do_group_exit(SIGKILL);
69594+}
69595+EXPORT_SYMBOL(report_size_overflow);
69596+#endif
69597diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
69598index 9f9992b..8b59411 100644
69599--- a/fs/ext2/balloc.c
69600+++ b/fs/ext2/balloc.c
69601@@ -1184,10 +1184,10 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi)
69602
69603 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
69604 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
69605- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
69606+ if (free_blocks < root_blocks + 1 &&
69607 !uid_eq(sbi->s_resuid, current_fsuid()) &&
69608 (gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) ||
69609- !in_group_p (sbi->s_resgid))) {
69610+ !in_group_p (sbi->s_resgid)) && !capable_nolog(CAP_SYS_RESOURCE)) {
69611 return 0;
69612 }
69613 return 1;
69614diff --git a/fs/ext2/super.c b/fs/ext2/super.c
69615index d0e746e..82e06f0 100644
69616--- a/fs/ext2/super.c
69617+++ b/fs/ext2/super.c
69618@@ -267,10 +267,8 @@ static int ext2_show_options(struct seq_file *seq, struct dentry *root)
69619 #ifdef CONFIG_EXT2_FS_XATTR
69620 if (test_opt(sb, XATTR_USER))
69621 seq_puts(seq, ",user_xattr");
69622- if (!test_opt(sb, XATTR_USER) &&
69623- (def_mount_opts & EXT2_DEFM_XATTR_USER)) {
69624+ if (!test_opt(sb, XATTR_USER))
69625 seq_puts(seq, ",nouser_xattr");
69626- }
69627 #endif
69628
69629 #ifdef CONFIG_EXT2_FS_POSIX_ACL
69630@@ -856,8 +854,8 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
69631 if (def_mount_opts & EXT2_DEFM_UID16)
69632 set_opt(sbi->s_mount_opt, NO_UID32);
69633 #ifdef CONFIG_EXT2_FS_XATTR
69634- if (def_mount_opts & EXT2_DEFM_XATTR_USER)
69635- set_opt(sbi->s_mount_opt, XATTR_USER);
69636+ /* always enable user xattrs */
69637+ set_opt(sbi->s_mount_opt, XATTR_USER);
69638 #endif
69639 #ifdef CONFIG_EXT2_FS_POSIX_ACL
69640 if (def_mount_opts & EXT2_DEFM_ACL)
69641diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c
69642index 9142614..97484fa 100644
69643--- a/fs/ext2/xattr.c
69644+++ b/fs/ext2/xattr.c
69645@@ -247,7 +247,7 @@ ext2_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size)
69646 struct buffer_head *bh = NULL;
69647 struct ext2_xattr_entry *entry;
69648 char *end;
69649- size_t rest = buffer_size;
69650+ size_t rest = buffer_size, total_size = 0;
69651 int error;
69652
69653 ea_idebug(inode, "buffer=%p, buffer_size=%ld",
69654@@ -305,9 +305,10 @@ bad_block: ext2_error(inode->i_sb, "ext2_xattr_list",
69655 buffer += size;
69656 }
69657 rest -= size;
69658+ total_size += size;
69659 }
69660 }
69661- error = buffer_size - rest; /* total size */
69662+ error = total_size;
69663
69664 cleanup:
69665 brelse(bh);
69666diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
69667index 158b5d4..2432610 100644
69668--- a/fs/ext3/balloc.c
69669+++ b/fs/ext3/balloc.c
69670@@ -1438,10 +1438,10 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi, int use_reservation)
69671
69672 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
69673 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
69674- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
69675+ if (free_blocks < root_blocks + 1 &&
69676 !use_reservation && !uid_eq(sbi->s_resuid, current_fsuid()) &&
69677 (gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) ||
69678- !in_group_p (sbi->s_resgid))) {
69679+ !in_group_p (sbi->s_resgid)) && !capable_nolog(CAP_SYS_RESOURCE)) {
69680 return 0;
69681 }
69682 return 1;
69683diff --git a/fs/ext3/super.c b/fs/ext3/super.c
69684index d4dbf3c..906a6fb 100644
69685--- a/fs/ext3/super.c
69686+++ b/fs/ext3/super.c
69687@@ -655,10 +655,8 @@ static int ext3_show_options(struct seq_file *seq, struct dentry *root)
69688 #ifdef CONFIG_EXT3_FS_XATTR
69689 if (test_opt(sb, XATTR_USER))
69690 seq_puts(seq, ",user_xattr");
69691- if (!test_opt(sb, XATTR_USER) &&
69692- (def_mount_opts & EXT3_DEFM_XATTR_USER)) {
69693+ if (!test_opt(sb, XATTR_USER))
69694 seq_puts(seq, ",nouser_xattr");
69695- }
69696 #endif
69697 #ifdef CONFIG_EXT3_FS_POSIX_ACL
69698 if (test_opt(sb, POSIX_ACL))
69699@@ -1760,8 +1758,8 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
69700 if (def_mount_opts & EXT3_DEFM_UID16)
69701 set_opt(sbi->s_mount_opt, NO_UID32);
69702 #ifdef CONFIG_EXT3_FS_XATTR
69703- if (def_mount_opts & EXT3_DEFM_XATTR_USER)
69704- set_opt(sbi->s_mount_opt, XATTR_USER);
69705+ /* always enable user xattrs */
69706+ set_opt(sbi->s_mount_opt, XATTR_USER);
69707 #endif
69708 #ifdef CONFIG_EXT3_FS_POSIX_ACL
69709 if (def_mount_opts & EXT3_DEFM_ACL)
69710diff --git a/fs/ext3/xattr.c b/fs/ext3/xattr.c
69711index c6874be..f8a6ae8 100644
69712--- a/fs/ext3/xattr.c
69713+++ b/fs/ext3/xattr.c
69714@@ -330,7 +330,7 @@ static int
69715 ext3_xattr_list_entries(struct dentry *dentry, struct ext3_xattr_entry *entry,
69716 char *buffer, size_t buffer_size)
69717 {
69718- size_t rest = buffer_size;
69719+ size_t rest = buffer_size, total_size = 0;
69720
69721 for (; !IS_LAST_ENTRY(entry); entry = EXT3_XATTR_NEXT(entry)) {
69722 const struct xattr_handler *handler =
69723@@ -347,9 +347,10 @@ ext3_xattr_list_entries(struct dentry *dentry, struct ext3_xattr_entry *entry,
69724 buffer += size;
69725 }
69726 rest -= size;
69727+ total_size += size;
69728 }
69729 }
69730- return buffer_size - rest;
69731+ return total_size;
69732 }
69733
69734 static int
69735diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
69736index 83a6f49..d4e4d03 100644
69737--- a/fs/ext4/balloc.c
69738+++ b/fs/ext4/balloc.c
69739@@ -557,8 +557,8 @@ static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
69740 /* Hm, nope. Are (enough) root reserved clusters available? */
69741 if (uid_eq(sbi->s_resuid, current_fsuid()) ||
69742 (!gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) && in_group_p(sbi->s_resgid)) ||
69743- capable(CAP_SYS_RESOURCE) ||
69744- (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
69745+ (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
69746+ capable_nolog(CAP_SYS_RESOURCE)) {
69747
69748 if (free_clusters >= (nclusters + dirty_clusters +
69749 resv_clusters))
69750diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
69751index f63c3d5..3c1a033 100644
69752--- a/fs/ext4/ext4.h
69753+++ b/fs/ext4/ext4.h
69754@@ -1287,19 +1287,19 @@ struct ext4_sb_info {
69755 unsigned long s_mb_last_start;
69756
69757 /* stats for buddy allocator */
69758- atomic_t s_bal_reqs; /* number of reqs with len > 1 */
69759- atomic_t s_bal_success; /* we found long enough chunks */
69760- atomic_t s_bal_allocated; /* in blocks */
69761- atomic_t s_bal_ex_scanned; /* total extents scanned */
69762- atomic_t s_bal_goals; /* goal hits */
69763- atomic_t s_bal_breaks; /* too long searches */
69764- atomic_t s_bal_2orders; /* 2^order hits */
69765+ atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
69766+ atomic_unchecked_t s_bal_success; /* we found long enough chunks */
69767+ atomic_unchecked_t s_bal_allocated; /* in blocks */
69768+ atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
69769+ atomic_unchecked_t s_bal_goals; /* goal hits */
69770+ atomic_unchecked_t s_bal_breaks; /* too long searches */
69771+ atomic_unchecked_t s_bal_2orders; /* 2^order hits */
69772 spinlock_t s_bal_lock;
69773 unsigned long s_mb_buddies_generated;
69774 unsigned long long s_mb_generation_time;
69775- atomic_t s_mb_lost_chunks;
69776- atomic_t s_mb_preallocated;
69777- atomic_t s_mb_discarded;
69778+ atomic_unchecked_t s_mb_lost_chunks;
69779+ atomic_unchecked_t s_mb_preallocated;
69780+ atomic_unchecked_t s_mb_discarded;
69781 atomic_t s_lock_busy;
69782
69783 /* locality groups */
69784diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
69785index 8d1e602..abf497b 100644
69786--- a/fs/ext4/mballoc.c
69787+++ b/fs/ext4/mballoc.c
69788@@ -1901,7 +1901,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
69789 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
69790
69791 if (EXT4_SB(sb)->s_mb_stats)
69792- atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
69793+ atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
69794
69795 break;
69796 }
69797@@ -2211,7 +2211,7 @@ repeat:
69798 ac->ac_status = AC_STATUS_CONTINUE;
69799 ac->ac_flags |= EXT4_MB_HINT_FIRST;
69800 cr = 3;
69801- atomic_inc(&sbi->s_mb_lost_chunks);
69802+ atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
69803 goto repeat;
69804 }
69805 }
69806@@ -2716,25 +2716,25 @@ int ext4_mb_release(struct super_block *sb)
69807 if (sbi->s_mb_stats) {
69808 ext4_msg(sb, KERN_INFO,
69809 "mballoc: %u blocks %u reqs (%u success)",
69810- atomic_read(&sbi->s_bal_allocated),
69811- atomic_read(&sbi->s_bal_reqs),
69812- atomic_read(&sbi->s_bal_success));
69813+ atomic_read_unchecked(&sbi->s_bal_allocated),
69814+ atomic_read_unchecked(&sbi->s_bal_reqs),
69815+ atomic_read_unchecked(&sbi->s_bal_success));
69816 ext4_msg(sb, KERN_INFO,
69817 "mballoc: %u extents scanned, %u goal hits, "
69818 "%u 2^N hits, %u breaks, %u lost",
69819- atomic_read(&sbi->s_bal_ex_scanned),
69820- atomic_read(&sbi->s_bal_goals),
69821- atomic_read(&sbi->s_bal_2orders),
69822- atomic_read(&sbi->s_bal_breaks),
69823- atomic_read(&sbi->s_mb_lost_chunks));
69824+ atomic_read_unchecked(&sbi->s_bal_ex_scanned),
69825+ atomic_read_unchecked(&sbi->s_bal_goals),
69826+ atomic_read_unchecked(&sbi->s_bal_2orders),
69827+ atomic_read_unchecked(&sbi->s_bal_breaks),
69828+ atomic_read_unchecked(&sbi->s_mb_lost_chunks));
69829 ext4_msg(sb, KERN_INFO,
69830 "mballoc: %lu generated and it took %Lu",
69831 sbi->s_mb_buddies_generated,
69832 sbi->s_mb_generation_time);
69833 ext4_msg(sb, KERN_INFO,
69834 "mballoc: %u preallocated, %u discarded",
69835- atomic_read(&sbi->s_mb_preallocated),
69836- atomic_read(&sbi->s_mb_discarded));
69837+ atomic_read_unchecked(&sbi->s_mb_preallocated),
69838+ atomic_read_unchecked(&sbi->s_mb_discarded));
69839 }
69840
69841 free_percpu(sbi->s_locality_groups);
69842@@ -3190,16 +3190,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
69843 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
69844
69845 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
69846- atomic_inc(&sbi->s_bal_reqs);
69847- atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
69848+ atomic_inc_unchecked(&sbi->s_bal_reqs);
69849+ atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
69850 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
69851- atomic_inc(&sbi->s_bal_success);
69852- atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
69853+ atomic_inc_unchecked(&sbi->s_bal_success);
69854+ atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
69855 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
69856 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
69857- atomic_inc(&sbi->s_bal_goals);
69858+ atomic_inc_unchecked(&sbi->s_bal_goals);
69859 if (ac->ac_found > sbi->s_mb_max_to_scan)
69860- atomic_inc(&sbi->s_bal_breaks);
69861+ atomic_inc_unchecked(&sbi->s_bal_breaks);
69862 }
69863
69864 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
69865@@ -3626,7 +3626,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
69866 trace_ext4_mb_new_inode_pa(ac, pa);
69867
69868 ext4_mb_use_inode_pa(ac, pa);
69869- atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
69870+ atomic_add_unchecked(pa->pa_free, &sbi->s_mb_preallocated);
69871
69872 ei = EXT4_I(ac->ac_inode);
69873 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
69874@@ -3686,7 +3686,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
69875 trace_ext4_mb_new_group_pa(ac, pa);
69876
69877 ext4_mb_use_group_pa(ac, pa);
69878- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
69879+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
69880
69881 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
69882 lg = ac->ac_lg;
69883@@ -3775,7 +3775,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
69884 * from the bitmap and continue.
69885 */
69886 }
69887- atomic_add(free, &sbi->s_mb_discarded);
69888+ atomic_add_unchecked(free, &sbi->s_mb_discarded);
69889
69890 return err;
69891 }
69892@@ -3793,7 +3793,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
69893 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
69894 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
69895 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
69896- atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
69897+ atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
69898 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
69899
69900 return 0;
69901diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c
69902index 8313ca3..8a37d08 100644
69903--- a/fs/ext4/mmp.c
69904+++ b/fs/ext4/mmp.c
69905@@ -111,7 +111,7 @@ static int read_mmp_block(struct super_block *sb, struct buffer_head **bh,
69906 void __dump_mmp_msg(struct super_block *sb, struct mmp_struct *mmp,
69907 const char *function, unsigned int line, const char *msg)
69908 {
69909- __ext4_warning(sb, function, line, msg);
69910+ __ext4_warning(sb, function, line, "%s", msg);
69911 __ext4_warning(sb, function, line,
69912 "MMP failure info: last update time: %llu, last update "
69913 "node: %s, last update device: %s\n",
69914diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
69915index 8a8ec62..1b02de5 100644
69916--- a/fs/ext4/resize.c
69917+++ b/fs/ext4/resize.c
69918@@ -413,7 +413,7 @@ static int set_flexbg_block_bitmap(struct super_block *sb, handle_t *handle,
69919
69920 ext4_debug("mark blocks [%llu/%u] used\n", block, count);
69921 for (count2 = count; count > 0; count -= count2, block += count2) {
69922- ext4_fsblk_t start;
69923+ ext4_fsblk_t start, diff;
69924 struct buffer_head *bh;
69925 ext4_group_t group;
69926 int err;
69927@@ -422,10 +422,6 @@ static int set_flexbg_block_bitmap(struct super_block *sb, handle_t *handle,
69928 start = ext4_group_first_block_no(sb, group);
69929 group -= flex_gd->groups[0].group;
69930
69931- count2 = EXT4_BLOCKS_PER_GROUP(sb) - (block - start);
69932- if (count2 > count)
69933- count2 = count;
69934-
69935 if (flex_gd->bg_flags[group] & EXT4_BG_BLOCK_UNINIT) {
69936 BUG_ON(flex_gd->count > 1);
69937 continue;
69938@@ -443,9 +439,15 @@ static int set_flexbg_block_bitmap(struct super_block *sb, handle_t *handle,
69939 err = ext4_journal_get_write_access(handle, bh);
69940 if (err)
69941 return err;
69942+
69943+ diff = block - start;
69944+ count2 = EXT4_BLOCKS_PER_GROUP(sb) - diff;
69945+ if (count2 > count)
69946+ count2 = count;
69947+
69948 ext4_debug("mark block bitmap %#04llx (+%llu/%u)\n", block,
69949- block - start, count2);
69950- ext4_set_bits(bh->b_data, block - start, count2);
69951+ diff, count2);
69952+ ext4_set_bits(bh->b_data, diff, count2);
69953
69954 err = ext4_handle_dirty_metadata(handle, NULL, bh);
69955 if (unlikely(err))
69956diff --git a/fs/ext4/super.c b/fs/ext4/super.c
69957index e061e66..87bc092 100644
69958--- a/fs/ext4/super.c
69959+++ b/fs/ext4/super.c
69960@@ -1243,7 +1243,7 @@ static ext4_fsblk_t get_sb_block(void **data)
69961 }
69962
69963 #define DEFAULT_JOURNAL_IOPRIO (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, 3))
69964-static char deprecated_msg[] = "Mount option \"%s\" will be removed by %s\n"
69965+static const char deprecated_msg[] = "Mount option \"%s\" will be removed by %s\n"
69966 "Contact linux-ext4@vger.kernel.org if you think we should keep it.\n";
69967
69968 #ifdef CONFIG_QUOTA
69969@@ -2443,7 +2443,7 @@ struct ext4_attr {
69970 int offset;
69971 int deprecated_val;
69972 } u;
69973-};
69974+} __do_const;
69975
69976 static int parse_strtoull(const char *buf,
69977 unsigned long long max, unsigned long long *value)
69978diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
69979index 1e09fc7..0400dd4 100644
69980--- a/fs/ext4/xattr.c
69981+++ b/fs/ext4/xattr.c
69982@@ -399,7 +399,7 @@ static int
69983 ext4_xattr_list_entries(struct dentry *dentry, struct ext4_xattr_entry *entry,
69984 char *buffer, size_t buffer_size)
69985 {
69986- size_t rest = buffer_size;
69987+ size_t rest = buffer_size, total_size = 0;
69988
69989 for (; !IS_LAST_ENTRY(entry); entry = EXT4_XATTR_NEXT(entry)) {
69990 const struct xattr_handler *handler =
69991@@ -416,9 +416,10 @@ ext4_xattr_list_entries(struct dentry *dentry, struct ext4_xattr_entry *entry,
69992 buffer += size;
69993 }
69994 rest -= size;
69995+ total_size += size;
69996 }
69997 }
69998- return buffer_size - rest;
69999+ return total_size;
70000 }
70001
70002 static int
70003diff --git a/fs/fcntl.c b/fs/fcntl.c
70004index ee85cd4..9dd0d20 100644
70005--- a/fs/fcntl.c
70006+++ b/fs/fcntl.c
70007@@ -102,6 +102,10 @@ void __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
70008 int force)
70009 {
70010 security_file_set_fowner(filp);
70011+ if (gr_handle_chroot_fowner(pid, type))
70012+ return;
70013+ if (gr_check_protected_task_fowner(pid, type))
70014+ return;
70015 f_modown(filp, pid, type, force);
70016 }
70017 EXPORT_SYMBOL(__f_setown);
70018diff --git a/fs/fhandle.c b/fs/fhandle.c
70019index d59712d..2281df9 100644
70020--- a/fs/fhandle.c
70021+++ b/fs/fhandle.c
70022@@ -8,6 +8,7 @@
70023 #include <linux/fs_struct.h>
70024 #include <linux/fsnotify.h>
70025 #include <linux/personality.h>
70026+#include <linux/grsecurity.h>
70027 #include <asm/uaccess.h>
70028 #include "internal.h"
70029 #include "mount.h"
70030@@ -67,8 +68,7 @@ static long do_sys_name_to_handle(struct path *path,
70031 } else
70032 retval = 0;
70033 /* copy the mount id */
70034- if (copy_to_user(mnt_id, &real_mount(path->mnt)->mnt_id,
70035- sizeof(*mnt_id)) ||
70036+ if (put_user(real_mount(path->mnt)->mnt_id, mnt_id) ||
70037 copy_to_user(ufh, handle,
70038 sizeof(struct file_handle) + handle_bytes))
70039 retval = -EFAULT;
70040@@ -175,7 +175,7 @@ static int handle_to_path(int mountdirfd, struct file_handle __user *ufh,
70041 * the directory. Ideally we would like CAP_DAC_SEARCH.
70042 * But we don't have that
70043 */
70044- if (!capable(CAP_DAC_READ_SEARCH)) {
70045+ if (!capable(CAP_DAC_READ_SEARCH) || !gr_chroot_fhandle()) {
70046 retval = -EPERM;
70047 goto out_err;
70048 }
70049diff --git a/fs/file.c b/fs/file.c
70050index ee738ea..f6c15629 100644
70051--- a/fs/file.c
70052+++ b/fs/file.c
70053@@ -16,6 +16,7 @@
70054 #include <linux/slab.h>
70055 #include <linux/vmalloc.h>
70056 #include <linux/file.h>
70057+#include <linux/security.h>
70058 #include <linux/fdtable.h>
70059 #include <linux/bitops.h>
70060 #include <linux/interrupt.h>
70061@@ -139,7 +140,7 @@ out:
70062 * Return <0 error code on error; 1 on successful completion.
70063 * The files->file_lock should be held on entry, and will be held on exit.
70064 */
70065-static int expand_fdtable(struct files_struct *files, int nr)
70066+static int expand_fdtable(struct files_struct *files, unsigned int nr)
70067 __releases(files->file_lock)
70068 __acquires(files->file_lock)
70069 {
70070@@ -184,7 +185,7 @@ static int expand_fdtable(struct files_struct *files, int nr)
70071 * expanded and execution may have blocked.
70072 * The files->file_lock should be held on entry, and will be held on exit.
70073 */
70074-static int expand_files(struct files_struct *files, int nr)
70075+static int expand_files(struct files_struct *files, unsigned int nr)
70076 {
70077 struct fdtable *fdt;
70078
70079@@ -800,6 +801,7 @@ int replace_fd(unsigned fd, struct file *file, unsigned flags)
70080 if (!file)
70081 return __close_fd(files, fd);
70082
70083+ gr_learn_resource(current, RLIMIT_NOFILE, fd, 0);
70084 if (fd >= rlimit(RLIMIT_NOFILE))
70085 return -EBADF;
70086
70087@@ -826,6 +828,7 @@ SYSCALL_DEFINE3(dup3, unsigned int, oldfd, unsigned int, newfd, int, flags)
70088 if (unlikely(oldfd == newfd))
70089 return -EINVAL;
70090
70091+ gr_learn_resource(current, RLIMIT_NOFILE, newfd, 0);
70092 if (newfd >= rlimit(RLIMIT_NOFILE))
70093 return -EBADF;
70094
70095@@ -881,6 +884,7 @@ SYSCALL_DEFINE1(dup, unsigned int, fildes)
70096 int f_dupfd(unsigned int from, struct file *file, unsigned flags)
70097 {
70098 int err;
70099+ gr_learn_resource(current, RLIMIT_NOFILE, from, 0);
70100 if (from >= rlimit(RLIMIT_NOFILE))
70101 return -EINVAL;
70102 err = alloc_fd(from, flags);
70103diff --git a/fs/filesystems.c b/fs/filesystems.c
70104index 5797d45..7d7d79a 100644
70105--- a/fs/filesystems.c
70106+++ b/fs/filesystems.c
70107@@ -275,7 +275,11 @@ struct file_system_type *get_fs_type(const char *name)
70108 int len = dot ? dot - name : strlen(name);
70109
70110 fs = __get_fs_type(name, len);
70111+#ifdef CONFIG_GRKERNSEC_MODHARDEN
70112+ if (!fs && (___request_module(true, "grsec_modharden_fs", "fs-%.*s", len, name) == 0))
70113+#else
70114 if (!fs && (request_module("fs-%.*s", len, name) == 0))
70115+#endif
70116 fs = __get_fs_type(name, len);
70117
70118 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
70119diff --git a/fs/fs_struct.c b/fs/fs_struct.c
70120index 7dca743..1ff87ae 100644
70121--- a/fs/fs_struct.c
70122+++ b/fs/fs_struct.c
70123@@ -4,6 +4,7 @@
70124 #include <linux/path.h>
70125 #include <linux/slab.h>
70126 #include <linux/fs_struct.h>
70127+#include <linux/grsecurity.h>
70128 #include "internal.h"
70129
70130 /*
70131@@ -15,14 +16,18 @@ void set_fs_root(struct fs_struct *fs, const struct path *path)
70132 struct path old_root;
70133
70134 path_get(path);
70135+ gr_inc_chroot_refcnts(path->dentry, path->mnt);
70136 spin_lock(&fs->lock);
70137 write_seqcount_begin(&fs->seq);
70138 old_root = fs->root;
70139 fs->root = *path;
70140+ gr_set_chroot_entries(current, path);
70141 write_seqcount_end(&fs->seq);
70142 spin_unlock(&fs->lock);
70143- if (old_root.dentry)
70144+ if (old_root.dentry) {
70145+ gr_dec_chroot_refcnts(old_root.dentry, old_root.mnt);
70146 path_put(&old_root);
70147+ }
70148 }
70149
70150 /*
70151@@ -67,6 +72,10 @@ void chroot_fs_refs(const struct path *old_root, const struct path *new_root)
70152 int hits = 0;
70153 spin_lock(&fs->lock);
70154 write_seqcount_begin(&fs->seq);
70155+ /* this root replacement is only done by pivot_root,
70156+ leave grsec's chroot tagging alone for this task
70157+ so that a pivoted root isn't treated as a chroot
70158+ */
70159 hits += replace_path(&fs->root, old_root, new_root);
70160 hits += replace_path(&fs->pwd, old_root, new_root);
70161 write_seqcount_end(&fs->seq);
70162@@ -85,6 +94,7 @@ void chroot_fs_refs(const struct path *old_root, const struct path *new_root)
70163
70164 void free_fs_struct(struct fs_struct *fs)
70165 {
70166+ gr_dec_chroot_refcnts(fs->root.dentry, fs->root.mnt);
70167 path_put(&fs->root);
70168 path_put(&fs->pwd);
70169 kmem_cache_free(fs_cachep, fs);
70170@@ -99,7 +109,8 @@ void exit_fs(struct task_struct *tsk)
70171 task_lock(tsk);
70172 spin_lock(&fs->lock);
70173 tsk->fs = NULL;
70174- kill = !--fs->users;
70175+ gr_clear_chroot_entries(tsk);
70176+ kill = !atomic_dec_return(&fs->users);
70177 spin_unlock(&fs->lock);
70178 task_unlock(tsk);
70179 if (kill)
70180@@ -112,7 +123,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
70181 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
70182 /* We don't need to lock fs - think why ;-) */
70183 if (fs) {
70184- fs->users = 1;
70185+ atomic_set(&fs->users, 1);
70186 fs->in_exec = 0;
70187 spin_lock_init(&fs->lock);
70188 seqcount_init(&fs->seq);
70189@@ -121,9 +132,13 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
70190 spin_lock(&old->lock);
70191 fs->root = old->root;
70192 path_get(&fs->root);
70193+ /* instead of calling gr_set_chroot_entries here,
70194+ we call it from every caller of this function
70195+ */
70196 fs->pwd = old->pwd;
70197 path_get(&fs->pwd);
70198 spin_unlock(&old->lock);
70199+ gr_inc_chroot_refcnts(fs->root.dentry, fs->root.mnt);
70200 }
70201 return fs;
70202 }
70203@@ -139,8 +154,9 @@ int unshare_fs_struct(void)
70204
70205 task_lock(current);
70206 spin_lock(&fs->lock);
70207- kill = !--fs->users;
70208+ kill = !atomic_dec_return(&fs->users);
70209 current->fs = new_fs;
70210+ gr_set_chroot_entries(current, &new_fs->root);
70211 spin_unlock(&fs->lock);
70212 task_unlock(current);
70213
70214@@ -153,13 +169,13 @@ EXPORT_SYMBOL_GPL(unshare_fs_struct);
70215
70216 int current_umask(void)
70217 {
70218- return current->fs->umask;
70219+ return current->fs->umask | gr_acl_umask();
70220 }
70221 EXPORT_SYMBOL(current_umask);
70222
70223 /* to be mentioned only in INIT_TASK */
70224 struct fs_struct init_fs = {
70225- .users = 1,
70226+ .users = ATOMIC_INIT(1),
70227 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
70228 .seq = SEQCNT_ZERO(init_fs.seq),
70229 .umask = 0022,
70230diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
70231index 89acec7..a575262 100644
70232--- a/fs/fscache/cookie.c
70233+++ b/fs/fscache/cookie.c
70234@@ -19,7 +19,7 @@
70235
70236 struct kmem_cache *fscache_cookie_jar;
70237
70238-static atomic_t fscache_object_debug_id = ATOMIC_INIT(0);
70239+static atomic_unchecked_t fscache_object_debug_id = ATOMIC_INIT(0);
70240
70241 static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie);
70242 static int fscache_alloc_object(struct fscache_cache *cache,
70243@@ -69,11 +69,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
70244 parent ? (char *) parent->def->name : "<no-parent>",
70245 def->name, netfs_data, enable);
70246
70247- fscache_stat(&fscache_n_acquires);
70248+ fscache_stat_unchecked(&fscache_n_acquires);
70249
70250 /* if there's no parent cookie, then we don't create one here either */
70251 if (!parent) {
70252- fscache_stat(&fscache_n_acquires_null);
70253+ fscache_stat_unchecked(&fscache_n_acquires_null);
70254 _leave(" [no parent]");
70255 return NULL;
70256 }
70257@@ -88,7 +88,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
70258 /* allocate and initialise a cookie */
70259 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
70260 if (!cookie) {
70261- fscache_stat(&fscache_n_acquires_oom);
70262+ fscache_stat_unchecked(&fscache_n_acquires_oom);
70263 _leave(" [ENOMEM]");
70264 return NULL;
70265 }
70266@@ -115,13 +115,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
70267
70268 switch (cookie->def->type) {
70269 case FSCACHE_COOKIE_TYPE_INDEX:
70270- fscache_stat(&fscache_n_cookie_index);
70271+ fscache_stat_unchecked(&fscache_n_cookie_index);
70272 break;
70273 case FSCACHE_COOKIE_TYPE_DATAFILE:
70274- fscache_stat(&fscache_n_cookie_data);
70275+ fscache_stat_unchecked(&fscache_n_cookie_data);
70276 break;
70277 default:
70278- fscache_stat(&fscache_n_cookie_special);
70279+ fscache_stat_unchecked(&fscache_n_cookie_special);
70280 break;
70281 }
70282
70283@@ -135,7 +135,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
70284 } else {
70285 atomic_dec(&parent->n_children);
70286 __fscache_cookie_put(cookie);
70287- fscache_stat(&fscache_n_acquires_nobufs);
70288+ fscache_stat_unchecked(&fscache_n_acquires_nobufs);
70289 _leave(" = NULL");
70290 return NULL;
70291 }
70292@@ -144,7 +144,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
70293 }
70294 }
70295
70296- fscache_stat(&fscache_n_acquires_ok);
70297+ fscache_stat_unchecked(&fscache_n_acquires_ok);
70298 _leave(" = %p", cookie);
70299 return cookie;
70300 }
70301@@ -213,7 +213,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
70302 cache = fscache_select_cache_for_object(cookie->parent);
70303 if (!cache) {
70304 up_read(&fscache_addremove_sem);
70305- fscache_stat(&fscache_n_acquires_no_cache);
70306+ fscache_stat_unchecked(&fscache_n_acquires_no_cache);
70307 _leave(" = -ENOMEDIUM [no cache]");
70308 return -ENOMEDIUM;
70309 }
70310@@ -297,14 +297,14 @@ static int fscache_alloc_object(struct fscache_cache *cache,
70311 object = cache->ops->alloc_object(cache, cookie);
70312 fscache_stat_d(&fscache_n_cop_alloc_object);
70313 if (IS_ERR(object)) {
70314- fscache_stat(&fscache_n_object_no_alloc);
70315+ fscache_stat_unchecked(&fscache_n_object_no_alloc);
70316 ret = PTR_ERR(object);
70317 goto error;
70318 }
70319
70320- fscache_stat(&fscache_n_object_alloc);
70321+ fscache_stat_unchecked(&fscache_n_object_alloc);
70322
70323- object->debug_id = atomic_inc_return(&fscache_object_debug_id);
70324+ object->debug_id = atomic_inc_return_unchecked(&fscache_object_debug_id);
70325
70326 _debug("ALLOC OBJ%x: %s {%lx}",
70327 object->debug_id, cookie->def->name, object->events);
70328@@ -418,7 +418,7 @@ void __fscache_invalidate(struct fscache_cookie *cookie)
70329
70330 _enter("{%s}", cookie->def->name);
70331
70332- fscache_stat(&fscache_n_invalidates);
70333+ fscache_stat_unchecked(&fscache_n_invalidates);
70334
70335 /* Only permit invalidation of data files. Invalidating an index will
70336 * require the caller to release all its attachments to the tree rooted
70337@@ -476,10 +476,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
70338 {
70339 struct fscache_object *object;
70340
70341- fscache_stat(&fscache_n_updates);
70342+ fscache_stat_unchecked(&fscache_n_updates);
70343
70344 if (!cookie) {
70345- fscache_stat(&fscache_n_updates_null);
70346+ fscache_stat_unchecked(&fscache_n_updates_null);
70347 _leave(" [no cookie]");
70348 return;
70349 }
70350@@ -580,12 +580,12 @@ EXPORT_SYMBOL(__fscache_disable_cookie);
70351 */
70352 void __fscache_relinquish_cookie(struct fscache_cookie *cookie, bool retire)
70353 {
70354- fscache_stat(&fscache_n_relinquishes);
70355+ fscache_stat_unchecked(&fscache_n_relinquishes);
70356 if (retire)
70357- fscache_stat(&fscache_n_relinquishes_retire);
70358+ fscache_stat_unchecked(&fscache_n_relinquishes_retire);
70359
70360 if (!cookie) {
70361- fscache_stat(&fscache_n_relinquishes_null);
70362+ fscache_stat_unchecked(&fscache_n_relinquishes_null);
70363 _leave(" [no cookie]");
70364 return;
70365 }
70366@@ -686,7 +686,7 @@ int __fscache_check_consistency(struct fscache_cookie *cookie)
70367 if (test_bit(FSCACHE_IOERROR, &object->cache->flags))
70368 goto inconsistent;
70369
70370- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
70371+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
70372
70373 __fscache_use_cookie(cookie);
70374 if (fscache_submit_op(object, op) < 0)
70375diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
70376index 7872a62..d91b19f 100644
70377--- a/fs/fscache/internal.h
70378+++ b/fs/fscache/internal.h
70379@@ -137,8 +137,8 @@ extern void fscache_operation_gc(struct work_struct *);
70380 extern int fscache_wait_for_deferred_lookup(struct fscache_cookie *);
70381 extern int fscache_wait_for_operation_activation(struct fscache_object *,
70382 struct fscache_operation *,
70383- atomic_t *,
70384- atomic_t *,
70385+ atomic_unchecked_t *,
70386+ atomic_unchecked_t *,
70387 void (*)(struct fscache_operation *));
70388 extern void fscache_invalidate_writes(struct fscache_cookie *);
70389
70390@@ -157,101 +157,101 @@ extern void fscache_proc_cleanup(void);
70391 * stats.c
70392 */
70393 #ifdef CONFIG_FSCACHE_STATS
70394-extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
70395-extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
70396+extern atomic_unchecked_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
70397+extern atomic_unchecked_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
70398
70399-extern atomic_t fscache_n_op_pend;
70400-extern atomic_t fscache_n_op_run;
70401-extern atomic_t fscache_n_op_enqueue;
70402-extern atomic_t fscache_n_op_deferred_release;
70403-extern atomic_t fscache_n_op_release;
70404-extern atomic_t fscache_n_op_gc;
70405-extern atomic_t fscache_n_op_cancelled;
70406-extern atomic_t fscache_n_op_rejected;
70407+extern atomic_unchecked_t fscache_n_op_pend;
70408+extern atomic_unchecked_t fscache_n_op_run;
70409+extern atomic_unchecked_t fscache_n_op_enqueue;
70410+extern atomic_unchecked_t fscache_n_op_deferred_release;
70411+extern atomic_unchecked_t fscache_n_op_release;
70412+extern atomic_unchecked_t fscache_n_op_gc;
70413+extern atomic_unchecked_t fscache_n_op_cancelled;
70414+extern atomic_unchecked_t fscache_n_op_rejected;
70415
70416-extern atomic_t fscache_n_attr_changed;
70417-extern atomic_t fscache_n_attr_changed_ok;
70418-extern atomic_t fscache_n_attr_changed_nobufs;
70419-extern atomic_t fscache_n_attr_changed_nomem;
70420-extern atomic_t fscache_n_attr_changed_calls;
70421+extern atomic_unchecked_t fscache_n_attr_changed;
70422+extern atomic_unchecked_t fscache_n_attr_changed_ok;
70423+extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
70424+extern atomic_unchecked_t fscache_n_attr_changed_nomem;
70425+extern atomic_unchecked_t fscache_n_attr_changed_calls;
70426
70427-extern atomic_t fscache_n_allocs;
70428-extern atomic_t fscache_n_allocs_ok;
70429-extern atomic_t fscache_n_allocs_wait;
70430-extern atomic_t fscache_n_allocs_nobufs;
70431-extern atomic_t fscache_n_allocs_intr;
70432-extern atomic_t fscache_n_allocs_object_dead;
70433-extern atomic_t fscache_n_alloc_ops;
70434-extern atomic_t fscache_n_alloc_op_waits;
70435+extern atomic_unchecked_t fscache_n_allocs;
70436+extern atomic_unchecked_t fscache_n_allocs_ok;
70437+extern atomic_unchecked_t fscache_n_allocs_wait;
70438+extern atomic_unchecked_t fscache_n_allocs_nobufs;
70439+extern atomic_unchecked_t fscache_n_allocs_intr;
70440+extern atomic_unchecked_t fscache_n_allocs_object_dead;
70441+extern atomic_unchecked_t fscache_n_alloc_ops;
70442+extern atomic_unchecked_t fscache_n_alloc_op_waits;
70443
70444-extern atomic_t fscache_n_retrievals;
70445-extern atomic_t fscache_n_retrievals_ok;
70446-extern atomic_t fscache_n_retrievals_wait;
70447-extern atomic_t fscache_n_retrievals_nodata;
70448-extern atomic_t fscache_n_retrievals_nobufs;
70449-extern atomic_t fscache_n_retrievals_intr;
70450-extern atomic_t fscache_n_retrievals_nomem;
70451-extern atomic_t fscache_n_retrievals_object_dead;
70452-extern atomic_t fscache_n_retrieval_ops;
70453-extern atomic_t fscache_n_retrieval_op_waits;
70454+extern atomic_unchecked_t fscache_n_retrievals;
70455+extern atomic_unchecked_t fscache_n_retrievals_ok;
70456+extern atomic_unchecked_t fscache_n_retrievals_wait;
70457+extern atomic_unchecked_t fscache_n_retrievals_nodata;
70458+extern atomic_unchecked_t fscache_n_retrievals_nobufs;
70459+extern atomic_unchecked_t fscache_n_retrievals_intr;
70460+extern atomic_unchecked_t fscache_n_retrievals_nomem;
70461+extern atomic_unchecked_t fscache_n_retrievals_object_dead;
70462+extern atomic_unchecked_t fscache_n_retrieval_ops;
70463+extern atomic_unchecked_t fscache_n_retrieval_op_waits;
70464
70465-extern atomic_t fscache_n_stores;
70466-extern atomic_t fscache_n_stores_ok;
70467-extern atomic_t fscache_n_stores_again;
70468-extern atomic_t fscache_n_stores_nobufs;
70469-extern atomic_t fscache_n_stores_oom;
70470-extern atomic_t fscache_n_store_ops;
70471-extern atomic_t fscache_n_store_calls;
70472-extern atomic_t fscache_n_store_pages;
70473-extern atomic_t fscache_n_store_radix_deletes;
70474-extern atomic_t fscache_n_store_pages_over_limit;
70475+extern atomic_unchecked_t fscache_n_stores;
70476+extern atomic_unchecked_t fscache_n_stores_ok;
70477+extern atomic_unchecked_t fscache_n_stores_again;
70478+extern atomic_unchecked_t fscache_n_stores_nobufs;
70479+extern atomic_unchecked_t fscache_n_stores_oom;
70480+extern atomic_unchecked_t fscache_n_store_ops;
70481+extern atomic_unchecked_t fscache_n_store_calls;
70482+extern atomic_unchecked_t fscache_n_store_pages;
70483+extern atomic_unchecked_t fscache_n_store_radix_deletes;
70484+extern atomic_unchecked_t fscache_n_store_pages_over_limit;
70485
70486-extern atomic_t fscache_n_store_vmscan_not_storing;
70487-extern atomic_t fscache_n_store_vmscan_gone;
70488-extern atomic_t fscache_n_store_vmscan_busy;
70489-extern atomic_t fscache_n_store_vmscan_cancelled;
70490-extern atomic_t fscache_n_store_vmscan_wait;
70491+extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
70492+extern atomic_unchecked_t fscache_n_store_vmscan_gone;
70493+extern atomic_unchecked_t fscache_n_store_vmscan_busy;
70494+extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
70495+extern atomic_unchecked_t fscache_n_store_vmscan_wait;
70496
70497-extern atomic_t fscache_n_marks;
70498-extern atomic_t fscache_n_uncaches;
70499+extern atomic_unchecked_t fscache_n_marks;
70500+extern atomic_unchecked_t fscache_n_uncaches;
70501
70502-extern atomic_t fscache_n_acquires;
70503-extern atomic_t fscache_n_acquires_null;
70504-extern atomic_t fscache_n_acquires_no_cache;
70505-extern atomic_t fscache_n_acquires_ok;
70506-extern atomic_t fscache_n_acquires_nobufs;
70507-extern atomic_t fscache_n_acquires_oom;
70508+extern atomic_unchecked_t fscache_n_acquires;
70509+extern atomic_unchecked_t fscache_n_acquires_null;
70510+extern atomic_unchecked_t fscache_n_acquires_no_cache;
70511+extern atomic_unchecked_t fscache_n_acquires_ok;
70512+extern atomic_unchecked_t fscache_n_acquires_nobufs;
70513+extern atomic_unchecked_t fscache_n_acquires_oom;
70514
70515-extern atomic_t fscache_n_invalidates;
70516-extern atomic_t fscache_n_invalidates_run;
70517+extern atomic_unchecked_t fscache_n_invalidates;
70518+extern atomic_unchecked_t fscache_n_invalidates_run;
70519
70520-extern atomic_t fscache_n_updates;
70521-extern atomic_t fscache_n_updates_null;
70522-extern atomic_t fscache_n_updates_run;
70523+extern atomic_unchecked_t fscache_n_updates;
70524+extern atomic_unchecked_t fscache_n_updates_null;
70525+extern atomic_unchecked_t fscache_n_updates_run;
70526
70527-extern atomic_t fscache_n_relinquishes;
70528-extern atomic_t fscache_n_relinquishes_null;
70529-extern atomic_t fscache_n_relinquishes_waitcrt;
70530-extern atomic_t fscache_n_relinquishes_retire;
70531+extern atomic_unchecked_t fscache_n_relinquishes;
70532+extern atomic_unchecked_t fscache_n_relinquishes_null;
70533+extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
70534+extern atomic_unchecked_t fscache_n_relinquishes_retire;
70535
70536-extern atomic_t fscache_n_cookie_index;
70537-extern atomic_t fscache_n_cookie_data;
70538-extern atomic_t fscache_n_cookie_special;
70539+extern atomic_unchecked_t fscache_n_cookie_index;
70540+extern atomic_unchecked_t fscache_n_cookie_data;
70541+extern atomic_unchecked_t fscache_n_cookie_special;
70542
70543-extern atomic_t fscache_n_object_alloc;
70544-extern atomic_t fscache_n_object_no_alloc;
70545-extern atomic_t fscache_n_object_lookups;
70546-extern atomic_t fscache_n_object_lookups_negative;
70547-extern atomic_t fscache_n_object_lookups_positive;
70548-extern atomic_t fscache_n_object_lookups_timed_out;
70549-extern atomic_t fscache_n_object_created;
70550-extern atomic_t fscache_n_object_avail;
70551-extern atomic_t fscache_n_object_dead;
70552+extern atomic_unchecked_t fscache_n_object_alloc;
70553+extern atomic_unchecked_t fscache_n_object_no_alloc;
70554+extern atomic_unchecked_t fscache_n_object_lookups;
70555+extern atomic_unchecked_t fscache_n_object_lookups_negative;
70556+extern atomic_unchecked_t fscache_n_object_lookups_positive;
70557+extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
70558+extern atomic_unchecked_t fscache_n_object_created;
70559+extern atomic_unchecked_t fscache_n_object_avail;
70560+extern atomic_unchecked_t fscache_n_object_dead;
70561
70562-extern atomic_t fscache_n_checkaux_none;
70563-extern atomic_t fscache_n_checkaux_okay;
70564-extern atomic_t fscache_n_checkaux_update;
70565-extern atomic_t fscache_n_checkaux_obsolete;
70566+extern atomic_unchecked_t fscache_n_checkaux_none;
70567+extern atomic_unchecked_t fscache_n_checkaux_okay;
70568+extern atomic_unchecked_t fscache_n_checkaux_update;
70569+extern atomic_unchecked_t fscache_n_checkaux_obsolete;
70570
70571 extern atomic_t fscache_n_cop_alloc_object;
70572 extern atomic_t fscache_n_cop_lookup_object;
70573@@ -276,6 +276,11 @@ static inline void fscache_stat(atomic_t *stat)
70574 atomic_inc(stat);
70575 }
70576
70577+static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
70578+{
70579+ atomic_inc_unchecked(stat);
70580+}
70581+
70582 static inline void fscache_stat_d(atomic_t *stat)
70583 {
70584 atomic_dec(stat);
70585@@ -288,6 +293,7 @@ extern const struct file_operations fscache_stats_fops;
70586
70587 #define __fscache_stat(stat) (NULL)
70588 #define fscache_stat(stat) do {} while (0)
70589+#define fscache_stat_unchecked(stat) do {} while (0)
70590 #define fscache_stat_d(stat) do {} while (0)
70591 #endif
70592
70593diff --git a/fs/fscache/object.c b/fs/fscache/object.c
70594index da032da..0076ce7 100644
70595--- a/fs/fscache/object.c
70596+++ b/fs/fscache/object.c
70597@@ -454,7 +454,7 @@ static const struct fscache_state *fscache_look_up_object(struct fscache_object
70598 _debug("LOOKUP \"%s\" in \"%s\"",
70599 cookie->def->name, object->cache->tag->name);
70600
70601- fscache_stat(&fscache_n_object_lookups);
70602+ fscache_stat_unchecked(&fscache_n_object_lookups);
70603 fscache_stat(&fscache_n_cop_lookup_object);
70604 ret = object->cache->ops->lookup_object(object);
70605 fscache_stat_d(&fscache_n_cop_lookup_object);
70606@@ -464,7 +464,7 @@ static const struct fscache_state *fscache_look_up_object(struct fscache_object
70607 if (ret == -ETIMEDOUT) {
70608 /* probably stuck behind another object, so move this one to
70609 * the back of the queue */
70610- fscache_stat(&fscache_n_object_lookups_timed_out);
70611+ fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
70612 _leave(" [timeout]");
70613 return NO_TRANSIT;
70614 }
70615@@ -492,7 +492,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
70616 _enter("{OBJ%x,%s}", object->debug_id, object->state->name);
70617
70618 if (!test_and_set_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) {
70619- fscache_stat(&fscache_n_object_lookups_negative);
70620+ fscache_stat_unchecked(&fscache_n_object_lookups_negative);
70621
70622 /* Allow write requests to begin stacking up and read requests to begin
70623 * returning ENODATA.
70624@@ -527,7 +527,7 @@ void fscache_obtained_object(struct fscache_object *object)
70625 /* if we were still looking up, then we must have a positive lookup
70626 * result, in which case there may be data available */
70627 if (!test_and_set_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) {
70628- fscache_stat(&fscache_n_object_lookups_positive);
70629+ fscache_stat_unchecked(&fscache_n_object_lookups_positive);
70630
70631 /* We do (presumably) have data */
70632 clear_bit_unlock(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
70633@@ -539,7 +539,7 @@ void fscache_obtained_object(struct fscache_object *object)
70634 clear_bit_unlock(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags);
70635 wake_up_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP);
70636 } else {
70637- fscache_stat(&fscache_n_object_created);
70638+ fscache_stat_unchecked(&fscache_n_object_created);
70639 }
70640
70641 set_bit(FSCACHE_OBJECT_IS_AVAILABLE, &object->flags);
70642@@ -575,7 +575,7 @@ static const struct fscache_state *fscache_object_available(struct fscache_objec
70643 fscache_stat_d(&fscache_n_cop_lookup_complete);
70644
70645 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
70646- fscache_stat(&fscache_n_object_avail);
70647+ fscache_stat_unchecked(&fscache_n_object_avail);
70648
70649 _leave("");
70650 return transit_to(JUMPSTART_DEPS);
70651@@ -722,7 +722,7 @@ static const struct fscache_state *fscache_drop_object(struct fscache_object *ob
70652
70653 /* this just shifts the object release to the work processor */
70654 fscache_put_object(object);
70655- fscache_stat(&fscache_n_object_dead);
70656+ fscache_stat_unchecked(&fscache_n_object_dead);
70657
70658 _leave("");
70659 return transit_to(OBJECT_DEAD);
70660@@ -887,7 +887,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
70661 enum fscache_checkaux result;
70662
70663 if (!object->cookie->def->check_aux) {
70664- fscache_stat(&fscache_n_checkaux_none);
70665+ fscache_stat_unchecked(&fscache_n_checkaux_none);
70666 return FSCACHE_CHECKAUX_OKAY;
70667 }
70668
70669@@ -896,17 +896,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
70670 switch (result) {
70671 /* entry okay as is */
70672 case FSCACHE_CHECKAUX_OKAY:
70673- fscache_stat(&fscache_n_checkaux_okay);
70674+ fscache_stat_unchecked(&fscache_n_checkaux_okay);
70675 break;
70676
70677 /* entry requires update */
70678 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
70679- fscache_stat(&fscache_n_checkaux_update);
70680+ fscache_stat_unchecked(&fscache_n_checkaux_update);
70681 break;
70682
70683 /* entry requires deletion */
70684 case FSCACHE_CHECKAUX_OBSOLETE:
70685- fscache_stat(&fscache_n_checkaux_obsolete);
70686+ fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
70687 break;
70688
70689 default:
70690@@ -993,7 +993,7 @@ static const struct fscache_state *fscache_invalidate_object(struct fscache_obje
70691 {
70692 const struct fscache_state *s;
70693
70694- fscache_stat(&fscache_n_invalidates_run);
70695+ fscache_stat_unchecked(&fscache_n_invalidates_run);
70696 fscache_stat(&fscache_n_cop_invalidate_object);
70697 s = _fscache_invalidate_object(object, event);
70698 fscache_stat_d(&fscache_n_cop_invalidate_object);
70699@@ -1008,7 +1008,7 @@ static const struct fscache_state *fscache_update_object(struct fscache_object *
70700 {
70701 _enter("{OBJ%x},%d", object->debug_id, event);
70702
70703- fscache_stat(&fscache_n_updates_run);
70704+ fscache_stat_unchecked(&fscache_n_updates_run);
70705 fscache_stat(&fscache_n_cop_update_object);
70706 object->cache->ops->update_object(object);
70707 fscache_stat_d(&fscache_n_cop_update_object);
70708diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
70709index e7b87a0..a85d47a 100644
70710--- a/fs/fscache/operation.c
70711+++ b/fs/fscache/operation.c
70712@@ -17,7 +17,7 @@
70713 #include <linux/slab.h>
70714 #include "internal.h"
70715
70716-atomic_t fscache_op_debug_id;
70717+atomic_unchecked_t fscache_op_debug_id;
70718 EXPORT_SYMBOL(fscache_op_debug_id);
70719
70720 /**
70721@@ -39,7 +39,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
70722 ASSERTCMP(atomic_read(&op->usage), >, 0);
70723 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_IN_PROGRESS);
70724
70725- fscache_stat(&fscache_n_op_enqueue);
70726+ fscache_stat_unchecked(&fscache_n_op_enqueue);
70727 switch (op->flags & FSCACHE_OP_TYPE) {
70728 case FSCACHE_OP_ASYNC:
70729 _debug("queue async");
70730@@ -72,7 +72,7 @@ static void fscache_run_op(struct fscache_object *object,
70731 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
70732 if (op->processor)
70733 fscache_enqueue_operation(op);
70734- fscache_stat(&fscache_n_op_run);
70735+ fscache_stat_unchecked(&fscache_n_op_run);
70736 }
70737
70738 /*
70739@@ -104,11 +104,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
70740 if (object->n_in_progress > 0) {
70741 atomic_inc(&op->usage);
70742 list_add_tail(&op->pend_link, &object->pending_ops);
70743- fscache_stat(&fscache_n_op_pend);
70744+ fscache_stat_unchecked(&fscache_n_op_pend);
70745 } else if (!list_empty(&object->pending_ops)) {
70746 atomic_inc(&op->usage);
70747 list_add_tail(&op->pend_link, &object->pending_ops);
70748- fscache_stat(&fscache_n_op_pend);
70749+ fscache_stat_unchecked(&fscache_n_op_pend);
70750 fscache_start_operations(object);
70751 } else {
70752 ASSERTCMP(object->n_in_progress, ==, 0);
70753@@ -124,7 +124,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
70754 object->n_exclusive++; /* reads and writes must wait */
70755 atomic_inc(&op->usage);
70756 list_add_tail(&op->pend_link, &object->pending_ops);
70757- fscache_stat(&fscache_n_op_pend);
70758+ fscache_stat_unchecked(&fscache_n_op_pend);
70759 ret = 0;
70760 } else {
70761 /* If we're in any other state, there must have been an I/O
70762@@ -211,11 +211,11 @@ int fscache_submit_op(struct fscache_object *object,
70763 if (object->n_exclusive > 0) {
70764 atomic_inc(&op->usage);
70765 list_add_tail(&op->pend_link, &object->pending_ops);
70766- fscache_stat(&fscache_n_op_pend);
70767+ fscache_stat_unchecked(&fscache_n_op_pend);
70768 } else if (!list_empty(&object->pending_ops)) {
70769 atomic_inc(&op->usage);
70770 list_add_tail(&op->pend_link, &object->pending_ops);
70771- fscache_stat(&fscache_n_op_pend);
70772+ fscache_stat_unchecked(&fscache_n_op_pend);
70773 fscache_start_operations(object);
70774 } else {
70775 ASSERTCMP(object->n_exclusive, ==, 0);
70776@@ -227,10 +227,10 @@ int fscache_submit_op(struct fscache_object *object,
70777 object->n_ops++;
70778 atomic_inc(&op->usage);
70779 list_add_tail(&op->pend_link, &object->pending_ops);
70780- fscache_stat(&fscache_n_op_pend);
70781+ fscache_stat_unchecked(&fscache_n_op_pend);
70782 ret = 0;
70783 } else if (fscache_object_is_dying(object)) {
70784- fscache_stat(&fscache_n_op_rejected);
70785+ fscache_stat_unchecked(&fscache_n_op_rejected);
70786 op->state = FSCACHE_OP_ST_CANCELLED;
70787 ret = -ENOBUFS;
70788 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
70789@@ -309,7 +309,7 @@ int fscache_cancel_op(struct fscache_operation *op,
70790 ret = -EBUSY;
70791 if (op->state == FSCACHE_OP_ST_PENDING) {
70792 ASSERT(!list_empty(&op->pend_link));
70793- fscache_stat(&fscache_n_op_cancelled);
70794+ fscache_stat_unchecked(&fscache_n_op_cancelled);
70795 list_del_init(&op->pend_link);
70796 if (do_cancel)
70797 do_cancel(op);
70798@@ -341,7 +341,7 @@ void fscache_cancel_all_ops(struct fscache_object *object)
70799 while (!list_empty(&object->pending_ops)) {
70800 op = list_entry(object->pending_ops.next,
70801 struct fscache_operation, pend_link);
70802- fscache_stat(&fscache_n_op_cancelled);
70803+ fscache_stat_unchecked(&fscache_n_op_cancelled);
70804 list_del_init(&op->pend_link);
70805
70806 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_PENDING);
70807@@ -413,7 +413,7 @@ void fscache_put_operation(struct fscache_operation *op)
70808 op->state, ==, FSCACHE_OP_ST_CANCELLED);
70809 op->state = FSCACHE_OP_ST_DEAD;
70810
70811- fscache_stat(&fscache_n_op_release);
70812+ fscache_stat_unchecked(&fscache_n_op_release);
70813
70814 if (op->release) {
70815 op->release(op);
70816@@ -432,7 +432,7 @@ void fscache_put_operation(struct fscache_operation *op)
70817 * lock, and defer it otherwise */
70818 if (!spin_trylock(&object->lock)) {
70819 _debug("defer put");
70820- fscache_stat(&fscache_n_op_deferred_release);
70821+ fscache_stat_unchecked(&fscache_n_op_deferred_release);
70822
70823 cache = object->cache;
70824 spin_lock(&cache->op_gc_list_lock);
70825@@ -485,7 +485,7 @@ void fscache_operation_gc(struct work_struct *work)
70826
70827 _debug("GC DEFERRED REL OBJ%x OP%x",
70828 object->debug_id, op->debug_id);
70829- fscache_stat(&fscache_n_op_gc);
70830+ fscache_stat_unchecked(&fscache_n_op_gc);
70831
70832 ASSERTCMP(atomic_read(&op->usage), ==, 0);
70833 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_DEAD);
70834diff --git a/fs/fscache/page.c b/fs/fscache/page.c
70835index de33b3f..8be4d29 100644
70836--- a/fs/fscache/page.c
70837+++ b/fs/fscache/page.c
70838@@ -74,7 +74,7 @@ try_again:
70839 val = radix_tree_lookup(&cookie->stores, page->index);
70840 if (!val) {
70841 rcu_read_unlock();
70842- fscache_stat(&fscache_n_store_vmscan_not_storing);
70843+ fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
70844 __fscache_uncache_page(cookie, page);
70845 return true;
70846 }
70847@@ -104,11 +104,11 @@ try_again:
70848 spin_unlock(&cookie->stores_lock);
70849
70850 if (xpage) {
70851- fscache_stat(&fscache_n_store_vmscan_cancelled);
70852- fscache_stat(&fscache_n_store_radix_deletes);
70853+ fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
70854+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
70855 ASSERTCMP(xpage, ==, page);
70856 } else {
70857- fscache_stat(&fscache_n_store_vmscan_gone);
70858+ fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
70859 }
70860
70861 wake_up_bit(&cookie->flags, 0);
70862@@ -123,11 +123,11 @@ page_busy:
70863 * sleeping on memory allocation, so we may need to impose a timeout
70864 * too. */
70865 if (!(gfp & __GFP_WAIT) || !(gfp & __GFP_FS)) {
70866- fscache_stat(&fscache_n_store_vmscan_busy);
70867+ fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
70868 return false;
70869 }
70870
70871- fscache_stat(&fscache_n_store_vmscan_wait);
70872+ fscache_stat_unchecked(&fscache_n_store_vmscan_wait);
70873 if (!release_page_wait_timeout(cookie, page))
70874 _debug("fscache writeout timeout page: %p{%lx}",
70875 page, page->index);
70876@@ -156,7 +156,7 @@ static void fscache_end_page_write(struct fscache_object *object,
70877 FSCACHE_COOKIE_STORING_TAG);
70878 if (!radix_tree_tag_get(&cookie->stores, page->index,
70879 FSCACHE_COOKIE_PENDING_TAG)) {
70880- fscache_stat(&fscache_n_store_radix_deletes);
70881+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
70882 xpage = radix_tree_delete(&cookie->stores, page->index);
70883 }
70884 spin_unlock(&cookie->stores_lock);
70885@@ -177,7 +177,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
70886
70887 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
70888
70889- fscache_stat(&fscache_n_attr_changed_calls);
70890+ fscache_stat_unchecked(&fscache_n_attr_changed_calls);
70891
70892 if (fscache_object_is_active(object)) {
70893 fscache_stat(&fscache_n_cop_attr_changed);
70894@@ -204,11 +204,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
70895
70896 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
70897
70898- fscache_stat(&fscache_n_attr_changed);
70899+ fscache_stat_unchecked(&fscache_n_attr_changed);
70900
70901 op = kzalloc(sizeof(*op), GFP_KERNEL);
70902 if (!op) {
70903- fscache_stat(&fscache_n_attr_changed_nomem);
70904+ fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
70905 _leave(" = -ENOMEM");
70906 return -ENOMEM;
70907 }
70908@@ -230,7 +230,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
70909 if (fscache_submit_exclusive_op(object, op) < 0)
70910 goto nobufs_dec;
70911 spin_unlock(&cookie->lock);
70912- fscache_stat(&fscache_n_attr_changed_ok);
70913+ fscache_stat_unchecked(&fscache_n_attr_changed_ok);
70914 fscache_put_operation(op);
70915 _leave(" = 0");
70916 return 0;
70917@@ -242,7 +242,7 @@ nobufs:
70918 kfree(op);
70919 if (wake_cookie)
70920 __fscache_wake_unused_cookie(cookie);
70921- fscache_stat(&fscache_n_attr_changed_nobufs);
70922+ fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
70923 _leave(" = %d", -ENOBUFS);
70924 return -ENOBUFS;
70925 }
70926@@ -281,7 +281,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
70927 /* allocate a retrieval operation and attempt to submit it */
70928 op = kzalloc(sizeof(*op), GFP_NOIO);
70929 if (!op) {
70930- fscache_stat(&fscache_n_retrievals_nomem);
70931+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
70932 return NULL;
70933 }
70934
70935@@ -311,12 +311,12 @@ int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
70936 return 0;
70937 }
70938
70939- fscache_stat(&fscache_n_retrievals_wait);
70940+ fscache_stat_unchecked(&fscache_n_retrievals_wait);
70941
70942 jif = jiffies;
70943 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
70944 TASK_INTERRUPTIBLE) != 0) {
70945- fscache_stat(&fscache_n_retrievals_intr);
70946+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
70947 _leave(" = -ERESTARTSYS");
70948 return -ERESTARTSYS;
70949 }
70950@@ -345,8 +345,8 @@ static void fscache_do_cancel_retrieval(struct fscache_operation *_op)
70951 */
70952 int fscache_wait_for_operation_activation(struct fscache_object *object,
70953 struct fscache_operation *op,
70954- atomic_t *stat_op_waits,
70955- atomic_t *stat_object_dead,
70956+ atomic_unchecked_t *stat_op_waits,
70957+ atomic_unchecked_t *stat_object_dead,
70958 void (*do_cancel)(struct fscache_operation *))
70959 {
70960 int ret;
70961@@ -356,7 +356,7 @@ int fscache_wait_for_operation_activation(struct fscache_object *object,
70962
70963 _debug(">>> WT");
70964 if (stat_op_waits)
70965- fscache_stat(stat_op_waits);
70966+ fscache_stat_unchecked(stat_op_waits);
70967 if (wait_on_bit(&op->flags, FSCACHE_OP_WAITING,
70968 TASK_INTERRUPTIBLE) != 0) {
70969 ret = fscache_cancel_op(op, do_cancel);
70970@@ -373,7 +373,7 @@ int fscache_wait_for_operation_activation(struct fscache_object *object,
70971 check_if_dead:
70972 if (op->state == FSCACHE_OP_ST_CANCELLED) {
70973 if (stat_object_dead)
70974- fscache_stat(stat_object_dead);
70975+ fscache_stat_unchecked(stat_object_dead);
70976 _leave(" = -ENOBUFS [cancelled]");
70977 return -ENOBUFS;
70978 }
70979@@ -381,7 +381,7 @@ check_if_dead:
70980 pr_err("%s() = -ENOBUFS [obj dead %d]\n", __func__, op->state);
70981 fscache_cancel_op(op, do_cancel);
70982 if (stat_object_dead)
70983- fscache_stat(stat_object_dead);
70984+ fscache_stat_unchecked(stat_object_dead);
70985 return -ENOBUFS;
70986 }
70987 return 0;
70988@@ -409,7 +409,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
70989
70990 _enter("%p,%p,,,", cookie, page);
70991
70992- fscache_stat(&fscache_n_retrievals);
70993+ fscache_stat_unchecked(&fscache_n_retrievals);
70994
70995 if (hlist_empty(&cookie->backing_objects))
70996 goto nobufs;
70997@@ -451,7 +451,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
70998 goto nobufs_unlock_dec;
70999 spin_unlock(&cookie->lock);
71000
71001- fscache_stat(&fscache_n_retrieval_ops);
71002+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
71003
71004 /* pin the netfs read context in case we need to do the actual netfs
71005 * read because we've encountered a cache read failure */
71006@@ -482,15 +482,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
71007
71008 error:
71009 if (ret == -ENOMEM)
71010- fscache_stat(&fscache_n_retrievals_nomem);
71011+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
71012 else if (ret == -ERESTARTSYS)
71013- fscache_stat(&fscache_n_retrievals_intr);
71014+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
71015 else if (ret == -ENODATA)
71016- fscache_stat(&fscache_n_retrievals_nodata);
71017+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
71018 else if (ret < 0)
71019- fscache_stat(&fscache_n_retrievals_nobufs);
71020+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
71021 else
71022- fscache_stat(&fscache_n_retrievals_ok);
71023+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
71024
71025 fscache_put_retrieval(op);
71026 _leave(" = %d", ret);
71027@@ -505,7 +505,7 @@ nobufs_unlock:
71028 __fscache_wake_unused_cookie(cookie);
71029 kfree(op);
71030 nobufs:
71031- fscache_stat(&fscache_n_retrievals_nobufs);
71032+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
71033 _leave(" = -ENOBUFS");
71034 return -ENOBUFS;
71035 }
71036@@ -544,7 +544,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
71037
71038 _enter("%p,,%d,,,", cookie, *nr_pages);
71039
71040- fscache_stat(&fscache_n_retrievals);
71041+ fscache_stat_unchecked(&fscache_n_retrievals);
71042
71043 if (hlist_empty(&cookie->backing_objects))
71044 goto nobufs;
71045@@ -582,7 +582,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
71046 goto nobufs_unlock_dec;
71047 spin_unlock(&cookie->lock);
71048
71049- fscache_stat(&fscache_n_retrieval_ops);
71050+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
71051
71052 /* pin the netfs read context in case we need to do the actual netfs
71053 * read because we've encountered a cache read failure */
71054@@ -613,15 +613,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
71055
71056 error:
71057 if (ret == -ENOMEM)
71058- fscache_stat(&fscache_n_retrievals_nomem);
71059+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
71060 else if (ret == -ERESTARTSYS)
71061- fscache_stat(&fscache_n_retrievals_intr);
71062+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
71063 else if (ret == -ENODATA)
71064- fscache_stat(&fscache_n_retrievals_nodata);
71065+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
71066 else if (ret < 0)
71067- fscache_stat(&fscache_n_retrievals_nobufs);
71068+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
71069 else
71070- fscache_stat(&fscache_n_retrievals_ok);
71071+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
71072
71073 fscache_put_retrieval(op);
71074 _leave(" = %d", ret);
71075@@ -636,7 +636,7 @@ nobufs_unlock:
71076 if (wake_cookie)
71077 __fscache_wake_unused_cookie(cookie);
71078 nobufs:
71079- fscache_stat(&fscache_n_retrievals_nobufs);
71080+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
71081 _leave(" = -ENOBUFS");
71082 return -ENOBUFS;
71083 }
71084@@ -661,7 +661,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
71085
71086 _enter("%p,%p,,,", cookie, page);
71087
71088- fscache_stat(&fscache_n_allocs);
71089+ fscache_stat_unchecked(&fscache_n_allocs);
71090
71091 if (hlist_empty(&cookie->backing_objects))
71092 goto nobufs;
71093@@ -695,7 +695,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
71094 goto nobufs_unlock_dec;
71095 spin_unlock(&cookie->lock);
71096
71097- fscache_stat(&fscache_n_alloc_ops);
71098+ fscache_stat_unchecked(&fscache_n_alloc_ops);
71099
71100 ret = fscache_wait_for_operation_activation(
71101 object, &op->op,
71102@@ -712,11 +712,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
71103
71104 error:
71105 if (ret == -ERESTARTSYS)
71106- fscache_stat(&fscache_n_allocs_intr);
71107+ fscache_stat_unchecked(&fscache_n_allocs_intr);
71108 else if (ret < 0)
71109- fscache_stat(&fscache_n_allocs_nobufs);
71110+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
71111 else
71112- fscache_stat(&fscache_n_allocs_ok);
71113+ fscache_stat_unchecked(&fscache_n_allocs_ok);
71114
71115 fscache_put_retrieval(op);
71116 _leave(" = %d", ret);
71117@@ -730,7 +730,7 @@ nobufs_unlock:
71118 if (wake_cookie)
71119 __fscache_wake_unused_cookie(cookie);
71120 nobufs:
71121- fscache_stat(&fscache_n_allocs_nobufs);
71122+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
71123 _leave(" = -ENOBUFS");
71124 return -ENOBUFS;
71125 }
71126@@ -806,7 +806,7 @@ static void fscache_write_op(struct fscache_operation *_op)
71127
71128 spin_lock(&cookie->stores_lock);
71129
71130- fscache_stat(&fscache_n_store_calls);
71131+ fscache_stat_unchecked(&fscache_n_store_calls);
71132
71133 /* find a page to store */
71134 page = NULL;
71135@@ -817,7 +817,7 @@ static void fscache_write_op(struct fscache_operation *_op)
71136 page = results[0];
71137 _debug("gang %d [%lx]", n, page->index);
71138 if (page->index > op->store_limit) {
71139- fscache_stat(&fscache_n_store_pages_over_limit);
71140+ fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
71141 goto superseded;
71142 }
71143
71144@@ -829,7 +829,7 @@ static void fscache_write_op(struct fscache_operation *_op)
71145 spin_unlock(&cookie->stores_lock);
71146 spin_unlock(&object->lock);
71147
71148- fscache_stat(&fscache_n_store_pages);
71149+ fscache_stat_unchecked(&fscache_n_store_pages);
71150 fscache_stat(&fscache_n_cop_write_page);
71151 ret = object->cache->ops->write_page(op, page);
71152 fscache_stat_d(&fscache_n_cop_write_page);
71153@@ -933,7 +933,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
71154 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
71155 ASSERT(PageFsCache(page));
71156
71157- fscache_stat(&fscache_n_stores);
71158+ fscache_stat_unchecked(&fscache_n_stores);
71159
71160 if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
71161 _leave(" = -ENOBUFS [invalidating]");
71162@@ -992,7 +992,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
71163 spin_unlock(&cookie->stores_lock);
71164 spin_unlock(&object->lock);
71165
71166- op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
71167+ op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
71168 op->store_limit = object->store_limit;
71169
71170 __fscache_use_cookie(cookie);
71171@@ -1001,8 +1001,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
71172
71173 spin_unlock(&cookie->lock);
71174 radix_tree_preload_end();
71175- fscache_stat(&fscache_n_store_ops);
71176- fscache_stat(&fscache_n_stores_ok);
71177+ fscache_stat_unchecked(&fscache_n_store_ops);
71178+ fscache_stat_unchecked(&fscache_n_stores_ok);
71179
71180 /* the work queue now carries its own ref on the object */
71181 fscache_put_operation(&op->op);
71182@@ -1010,14 +1010,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
71183 return 0;
71184
71185 already_queued:
71186- fscache_stat(&fscache_n_stores_again);
71187+ fscache_stat_unchecked(&fscache_n_stores_again);
71188 already_pending:
71189 spin_unlock(&cookie->stores_lock);
71190 spin_unlock(&object->lock);
71191 spin_unlock(&cookie->lock);
71192 radix_tree_preload_end();
71193 kfree(op);
71194- fscache_stat(&fscache_n_stores_ok);
71195+ fscache_stat_unchecked(&fscache_n_stores_ok);
71196 _leave(" = 0");
71197 return 0;
71198
71199@@ -1039,14 +1039,14 @@ nobufs:
71200 kfree(op);
71201 if (wake_cookie)
71202 __fscache_wake_unused_cookie(cookie);
71203- fscache_stat(&fscache_n_stores_nobufs);
71204+ fscache_stat_unchecked(&fscache_n_stores_nobufs);
71205 _leave(" = -ENOBUFS");
71206 return -ENOBUFS;
71207
71208 nomem_free:
71209 kfree(op);
71210 nomem:
71211- fscache_stat(&fscache_n_stores_oom);
71212+ fscache_stat_unchecked(&fscache_n_stores_oom);
71213 _leave(" = -ENOMEM");
71214 return -ENOMEM;
71215 }
71216@@ -1064,7 +1064,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
71217 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
71218 ASSERTCMP(page, !=, NULL);
71219
71220- fscache_stat(&fscache_n_uncaches);
71221+ fscache_stat_unchecked(&fscache_n_uncaches);
71222
71223 /* cache withdrawal may beat us to it */
71224 if (!PageFsCache(page))
71225@@ -1115,7 +1115,7 @@ void fscache_mark_page_cached(struct fscache_retrieval *op, struct page *page)
71226 struct fscache_cookie *cookie = op->op.object->cookie;
71227
71228 #ifdef CONFIG_FSCACHE_STATS
71229- atomic_inc(&fscache_n_marks);
71230+ atomic_inc_unchecked(&fscache_n_marks);
71231 #endif
71232
71233 _debug("- mark %p{%lx}", page, page->index);
71234diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
71235index 40d13c7..ddf52b9 100644
71236--- a/fs/fscache/stats.c
71237+++ b/fs/fscache/stats.c
71238@@ -18,99 +18,99 @@
71239 /*
71240 * operation counters
71241 */
71242-atomic_t fscache_n_op_pend;
71243-atomic_t fscache_n_op_run;
71244-atomic_t fscache_n_op_enqueue;
71245-atomic_t fscache_n_op_requeue;
71246-atomic_t fscache_n_op_deferred_release;
71247-atomic_t fscache_n_op_release;
71248-atomic_t fscache_n_op_gc;
71249-atomic_t fscache_n_op_cancelled;
71250-atomic_t fscache_n_op_rejected;
71251+atomic_unchecked_t fscache_n_op_pend;
71252+atomic_unchecked_t fscache_n_op_run;
71253+atomic_unchecked_t fscache_n_op_enqueue;
71254+atomic_unchecked_t fscache_n_op_requeue;
71255+atomic_unchecked_t fscache_n_op_deferred_release;
71256+atomic_unchecked_t fscache_n_op_release;
71257+atomic_unchecked_t fscache_n_op_gc;
71258+atomic_unchecked_t fscache_n_op_cancelled;
71259+atomic_unchecked_t fscache_n_op_rejected;
71260
71261-atomic_t fscache_n_attr_changed;
71262-atomic_t fscache_n_attr_changed_ok;
71263-atomic_t fscache_n_attr_changed_nobufs;
71264-atomic_t fscache_n_attr_changed_nomem;
71265-atomic_t fscache_n_attr_changed_calls;
71266+atomic_unchecked_t fscache_n_attr_changed;
71267+atomic_unchecked_t fscache_n_attr_changed_ok;
71268+atomic_unchecked_t fscache_n_attr_changed_nobufs;
71269+atomic_unchecked_t fscache_n_attr_changed_nomem;
71270+atomic_unchecked_t fscache_n_attr_changed_calls;
71271
71272-atomic_t fscache_n_allocs;
71273-atomic_t fscache_n_allocs_ok;
71274-atomic_t fscache_n_allocs_wait;
71275-atomic_t fscache_n_allocs_nobufs;
71276-atomic_t fscache_n_allocs_intr;
71277-atomic_t fscache_n_allocs_object_dead;
71278-atomic_t fscache_n_alloc_ops;
71279-atomic_t fscache_n_alloc_op_waits;
71280+atomic_unchecked_t fscache_n_allocs;
71281+atomic_unchecked_t fscache_n_allocs_ok;
71282+atomic_unchecked_t fscache_n_allocs_wait;
71283+atomic_unchecked_t fscache_n_allocs_nobufs;
71284+atomic_unchecked_t fscache_n_allocs_intr;
71285+atomic_unchecked_t fscache_n_allocs_object_dead;
71286+atomic_unchecked_t fscache_n_alloc_ops;
71287+atomic_unchecked_t fscache_n_alloc_op_waits;
71288
71289-atomic_t fscache_n_retrievals;
71290-atomic_t fscache_n_retrievals_ok;
71291-atomic_t fscache_n_retrievals_wait;
71292-atomic_t fscache_n_retrievals_nodata;
71293-atomic_t fscache_n_retrievals_nobufs;
71294-atomic_t fscache_n_retrievals_intr;
71295-atomic_t fscache_n_retrievals_nomem;
71296-atomic_t fscache_n_retrievals_object_dead;
71297-atomic_t fscache_n_retrieval_ops;
71298-atomic_t fscache_n_retrieval_op_waits;
71299+atomic_unchecked_t fscache_n_retrievals;
71300+atomic_unchecked_t fscache_n_retrievals_ok;
71301+atomic_unchecked_t fscache_n_retrievals_wait;
71302+atomic_unchecked_t fscache_n_retrievals_nodata;
71303+atomic_unchecked_t fscache_n_retrievals_nobufs;
71304+atomic_unchecked_t fscache_n_retrievals_intr;
71305+atomic_unchecked_t fscache_n_retrievals_nomem;
71306+atomic_unchecked_t fscache_n_retrievals_object_dead;
71307+atomic_unchecked_t fscache_n_retrieval_ops;
71308+atomic_unchecked_t fscache_n_retrieval_op_waits;
71309
71310-atomic_t fscache_n_stores;
71311-atomic_t fscache_n_stores_ok;
71312-atomic_t fscache_n_stores_again;
71313-atomic_t fscache_n_stores_nobufs;
71314-atomic_t fscache_n_stores_oom;
71315-atomic_t fscache_n_store_ops;
71316-atomic_t fscache_n_store_calls;
71317-atomic_t fscache_n_store_pages;
71318-atomic_t fscache_n_store_radix_deletes;
71319-atomic_t fscache_n_store_pages_over_limit;
71320+atomic_unchecked_t fscache_n_stores;
71321+atomic_unchecked_t fscache_n_stores_ok;
71322+atomic_unchecked_t fscache_n_stores_again;
71323+atomic_unchecked_t fscache_n_stores_nobufs;
71324+atomic_unchecked_t fscache_n_stores_oom;
71325+atomic_unchecked_t fscache_n_store_ops;
71326+atomic_unchecked_t fscache_n_store_calls;
71327+atomic_unchecked_t fscache_n_store_pages;
71328+atomic_unchecked_t fscache_n_store_radix_deletes;
71329+atomic_unchecked_t fscache_n_store_pages_over_limit;
71330
71331-atomic_t fscache_n_store_vmscan_not_storing;
71332-atomic_t fscache_n_store_vmscan_gone;
71333-atomic_t fscache_n_store_vmscan_busy;
71334-atomic_t fscache_n_store_vmscan_cancelled;
71335-atomic_t fscache_n_store_vmscan_wait;
71336+atomic_unchecked_t fscache_n_store_vmscan_not_storing;
71337+atomic_unchecked_t fscache_n_store_vmscan_gone;
71338+atomic_unchecked_t fscache_n_store_vmscan_busy;
71339+atomic_unchecked_t fscache_n_store_vmscan_cancelled;
71340+atomic_unchecked_t fscache_n_store_vmscan_wait;
71341
71342-atomic_t fscache_n_marks;
71343-atomic_t fscache_n_uncaches;
71344+atomic_unchecked_t fscache_n_marks;
71345+atomic_unchecked_t fscache_n_uncaches;
71346
71347-atomic_t fscache_n_acquires;
71348-atomic_t fscache_n_acquires_null;
71349-atomic_t fscache_n_acquires_no_cache;
71350-atomic_t fscache_n_acquires_ok;
71351-atomic_t fscache_n_acquires_nobufs;
71352-atomic_t fscache_n_acquires_oom;
71353+atomic_unchecked_t fscache_n_acquires;
71354+atomic_unchecked_t fscache_n_acquires_null;
71355+atomic_unchecked_t fscache_n_acquires_no_cache;
71356+atomic_unchecked_t fscache_n_acquires_ok;
71357+atomic_unchecked_t fscache_n_acquires_nobufs;
71358+atomic_unchecked_t fscache_n_acquires_oom;
71359
71360-atomic_t fscache_n_invalidates;
71361-atomic_t fscache_n_invalidates_run;
71362+atomic_unchecked_t fscache_n_invalidates;
71363+atomic_unchecked_t fscache_n_invalidates_run;
71364
71365-atomic_t fscache_n_updates;
71366-atomic_t fscache_n_updates_null;
71367-atomic_t fscache_n_updates_run;
71368+atomic_unchecked_t fscache_n_updates;
71369+atomic_unchecked_t fscache_n_updates_null;
71370+atomic_unchecked_t fscache_n_updates_run;
71371
71372-atomic_t fscache_n_relinquishes;
71373-atomic_t fscache_n_relinquishes_null;
71374-atomic_t fscache_n_relinquishes_waitcrt;
71375-atomic_t fscache_n_relinquishes_retire;
71376+atomic_unchecked_t fscache_n_relinquishes;
71377+atomic_unchecked_t fscache_n_relinquishes_null;
71378+atomic_unchecked_t fscache_n_relinquishes_waitcrt;
71379+atomic_unchecked_t fscache_n_relinquishes_retire;
71380
71381-atomic_t fscache_n_cookie_index;
71382-atomic_t fscache_n_cookie_data;
71383-atomic_t fscache_n_cookie_special;
71384+atomic_unchecked_t fscache_n_cookie_index;
71385+atomic_unchecked_t fscache_n_cookie_data;
71386+atomic_unchecked_t fscache_n_cookie_special;
71387
71388-atomic_t fscache_n_object_alloc;
71389-atomic_t fscache_n_object_no_alloc;
71390-atomic_t fscache_n_object_lookups;
71391-atomic_t fscache_n_object_lookups_negative;
71392-atomic_t fscache_n_object_lookups_positive;
71393-atomic_t fscache_n_object_lookups_timed_out;
71394-atomic_t fscache_n_object_created;
71395-atomic_t fscache_n_object_avail;
71396-atomic_t fscache_n_object_dead;
71397+atomic_unchecked_t fscache_n_object_alloc;
71398+atomic_unchecked_t fscache_n_object_no_alloc;
71399+atomic_unchecked_t fscache_n_object_lookups;
71400+atomic_unchecked_t fscache_n_object_lookups_negative;
71401+atomic_unchecked_t fscache_n_object_lookups_positive;
71402+atomic_unchecked_t fscache_n_object_lookups_timed_out;
71403+atomic_unchecked_t fscache_n_object_created;
71404+atomic_unchecked_t fscache_n_object_avail;
71405+atomic_unchecked_t fscache_n_object_dead;
71406
71407-atomic_t fscache_n_checkaux_none;
71408-atomic_t fscache_n_checkaux_okay;
71409-atomic_t fscache_n_checkaux_update;
71410-atomic_t fscache_n_checkaux_obsolete;
71411+atomic_unchecked_t fscache_n_checkaux_none;
71412+atomic_unchecked_t fscache_n_checkaux_okay;
71413+atomic_unchecked_t fscache_n_checkaux_update;
71414+atomic_unchecked_t fscache_n_checkaux_obsolete;
71415
71416 atomic_t fscache_n_cop_alloc_object;
71417 atomic_t fscache_n_cop_lookup_object;
71418@@ -138,118 +138,118 @@ static int fscache_stats_show(struct seq_file *m, void *v)
71419 seq_puts(m, "FS-Cache statistics\n");
71420
71421 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
71422- atomic_read(&fscache_n_cookie_index),
71423- atomic_read(&fscache_n_cookie_data),
71424- atomic_read(&fscache_n_cookie_special));
71425+ atomic_read_unchecked(&fscache_n_cookie_index),
71426+ atomic_read_unchecked(&fscache_n_cookie_data),
71427+ atomic_read_unchecked(&fscache_n_cookie_special));
71428
71429 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
71430- atomic_read(&fscache_n_object_alloc),
71431- atomic_read(&fscache_n_object_no_alloc),
71432- atomic_read(&fscache_n_object_avail),
71433- atomic_read(&fscache_n_object_dead));
71434+ atomic_read_unchecked(&fscache_n_object_alloc),
71435+ atomic_read_unchecked(&fscache_n_object_no_alloc),
71436+ atomic_read_unchecked(&fscache_n_object_avail),
71437+ atomic_read_unchecked(&fscache_n_object_dead));
71438 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
71439- atomic_read(&fscache_n_checkaux_none),
71440- atomic_read(&fscache_n_checkaux_okay),
71441- atomic_read(&fscache_n_checkaux_update),
71442- atomic_read(&fscache_n_checkaux_obsolete));
71443+ atomic_read_unchecked(&fscache_n_checkaux_none),
71444+ atomic_read_unchecked(&fscache_n_checkaux_okay),
71445+ atomic_read_unchecked(&fscache_n_checkaux_update),
71446+ atomic_read_unchecked(&fscache_n_checkaux_obsolete));
71447
71448 seq_printf(m, "Pages : mrk=%u unc=%u\n",
71449- atomic_read(&fscache_n_marks),
71450- atomic_read(&fscache_n_uncaches));
71451+ atomic_read_unchecked(&fscache_n_marks),
71452+ atomic_read_unchecked(&fscache_n_uncaches));
71453
71454 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
71455 " oom=%u\n",
71456- atomic_read(&fscache_n_acquires),
71457- atomic_read(&fscache_n_acquires_null),
71458- atomic_read(&fscache_n_acquires_no_cache),
71459- atomic_read(&fscache_n_acquires_ok),
71460- atomic_read(&fscache_n_acquires_nobufs),
71461- atomic_read(&fscache_n_acquires_oom));
71462+ atomic_read_unchecked(&fscache_n_acquires),
71463+ atomic_read_unchecked(&fscache_n_acquires_null),
71464+ atomic_read_unchecked(&fscache_n_acquires_no_cache),
71465+ atomic_read_unchecked(&fscache_n_acquires_ok),
71466+ atomic_read_unchecked(&fscache_n_acquires_nobufs),
71467+ atomic_read_unchecked(&fscache_n_acquires_oom));
71468
71469 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
71470- atomic_read(&fscache_n_object_lookups),
71471- atomic_read(&fscache_n_object_lookups_negative),
71472- atomic_read(&fscache_n_object_lookups_positive),
71473- atomic_read(&fscache_n_object_created),
71474- atomic_read(&fscache_n_object_lookups_timed_out));
71475+ atomic_read_unchecked(&fscache_n_object_lookups),
71476+ atomic_read_unchecked(&fscache_n_object_lookups_negative),
71477+ atomic_read_unchecked(&fscache_n_object_lookups_positive),
71478+ atomic_read_unchecked(&fscache_n_object_created),
71479+ atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
71480
71481 seq_printf(m, "Invals : n=%u run=%u\n",
71482- atomic_read(&fscache_n_invalidates),
71483- atomic_read(&fscache_n_invalidates_run));
71484+ atomic_read_unchecked(&fscache_n_invalidates),
71485+ atomic_read_unchecked(&fscache_n_invalidates_run));
71486
71487 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
71488- atomic_read(&fscache_n_updates),
71489- atomic_read(&fscache_n_updates_null),
71490- atomic_read(&fscache_n_updates_run));
71491+ atomic_read_unchecked(&fscache_n_updates),
71492+ atomic_read_unchecked(&fscache_n_updates_null),
71493+ atomic_read_unchecked(&fscache_n_updates_run));
71494
71495 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
71496- atomic_read(&fscache_n_relinquishes),
71497- atomic_read(&fscache_n_relinquishes_null),
71498- atomic_read(&fscache_n_relinquishes_waitcrt),
71499- atomic_read(&fscache_n_relinquishes_retire));
71500+ atomic_read_unchecked(&fscache_n_relinquishes),
71501+ atomic_read_unchecked(&fscache_n_relinquishes_null),
71502+ atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
71503+ atomic_read_unchecked(&fscache_n_relinquishes_retire));
71504
71505 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
71506- atomic_read(&fscache_n_attr_changed),
71507- atomic_read(&fscache_n_attr_changed_ok),
71508- atomic_read(&fscache_n_attr_changed_nobufs),
71509- atomic_read(&fscache_n_attr_changed_nomem),
71510- atomic_read(&fscache_n_attr_changed_calls));
71511+ atomic_read_unchecked(&fscache_n_attr_changed),
71512+ atomic_read_unchecked(&fscache_n_attr_changed_ok),
71513+ atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
71514+ atomic_read_unchecked(&fscache_n_attr_changed_nomem),
71515+ atomic_read_unchecked(&fscache_n_attr_changed_calls));
71516
71517 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
71518- atomic_read(&fscache_n_allocs),
71519- atomic_read(&fscache_n_allocs_ok),
71520- atomic_read(&fscache_n_allocs_wait),
71521- atomic_read(&fscache_n_allocs_nobufs),
71522- atomic_read(&fscache_n_allocs_intr));
71523+ atomic_read_unchecked(&fscache_n_allocs),
71524+ atomic_read_unchecked(&fscache_n_allocs_ok),
71525+ atomic_read_unchecked(&fscache_n_allocs_wait),
71526+ atomic_read_unchecked(&fscache_n_allocs_nobufs),
71527+ atomic_read_unchecked(&fscache_n_allocs_intr));
71528 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
71529- atomic_read(&fscache_n_alloc_ops),
71530- atomic_read(&fscache_n_alloc_op_waits),
71531- atomic_read(&fscache_n_allocs_object_dead));
71532+ atomic_read_unchecked(&fscache_n_alloc_ops),
71533+ atomic_read_unchecked(&fscache_n_alloc_op_waits),
71534+ atomic_read_unchecked(&fscache_n_allocs_object_dead));
71535
71536 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
71537 " int=%u oom=%u\n",
71538- atomic_read(&fscache_n_retrievals),
71539- atomic_read(&fscache_n_retrievals_ok),
71540- atomic_read(&fscache_n_retrievals_wait),
71541- atomic_read(&fscache_n_retrievals_nodata),
71542- atomic_read(&fscache_n_retrievals_nobufs),
71543- atomic_read(&fscache_n_retrievals_intr),
71544- atomic_read(&fscache_n_retrievals_nomem));
71545+ atomic_read_unchecked(&fscache_n_retrievals),
71546+ atomic_read_unchecked(&fscache_n_retrievals_ok),
71547+ atomic_read_unchecked(&fscache_n_retrievals_wait),
71548+ atomic_read_unchecked(&fscache_n_retrievals_nodata),
71549+ atomic_read_unchecked(&fscache_n_retrievals_nobufs),
71550+ atomic_read_unchecked(&fscache_n_retrievals_intr),
71551+ atomic_read_unchecked(&fscache_n_retrievals_nomem));
71552 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
71553- atomic_read(&fscache_n_retrieval_ops),
71554- atomic_read(&fscache_n_retrieval_op_waits),
71555- atomic_read(&fscache_n_retrievals_object_dead));
71556+ atomic_read_unchecked(&fscache_n_retrieval_ops),
71557+ atomic_read_unchecked(&fscache_n_retrieval_op_waits),
71558+ atomic_read_unchecked(&fscache_n_retrievals_object_dead));
71559
71560 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
71561- atomic_read(&fscache_n_stores),
71562- atomic_read(&fscache_n_stores_ok),
71563- atomic_read(&fscache_n_stores_again),
71564- atomic_read(&fscache_n_stores_nobufs),
71565- atomic_read(&fscache_n_stores_oom));
71566+ atomic_read_unchecked(&fscache_n_stores),
71567+ atomic_read_unchecked(&fscache_n_stores_ok),
71568+ atomic_read_unchecked(&fscache_n_stores_again),
71569+ atomic_read_unchecked(&fscache_n_stores_nobufs),
71570+ atomic_read_unchecked(&fscache_n_stores_oom));
71571 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
71572- atomic_read(&fscache_n_store_ops),
71573- atomic_read(&fscache_n_store_calls),
71574- atomic_read(&fscache_n_store_pages),
71575- atomic_read(&fscache_n_store_radix_deletes),
71576- atomic_read(&fscache_n_store_pages_over_limit));
71577+ atomic_read_unchecked(&fscache_n_store_ops),
71578+ atomic_read_unchecked(&fscache_n_store_calls),
71579+ atomic_read_unchecked(&fscache_n_store_pages),
71580+ atomic_read_unchecked(&fscache_n_store_radix_deletes),
71581+ atomic_read_unchecked(&fscache_n_store_pages_over_limit));
71582
71583 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u wt=%u\n",
71584- atomic_read(&fscache_n_store_vmscan_not_storing),
71585- atomic_read(&fscache_n_store_vmscan_gone),
71586- atomic_read(&fscache_n_store_vmscan_busy),
71587- atomic_read(&fscache_n_store_vmscan_cancelled),
71588- atomic_read(&fscache_n_store_vmscan_wait));
71589+ atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
71590+ atomic_read_unchecked(&fscache_n_store_vmscan_gone),
71591+ atomic_read_unchecked(&fscache_n_store_vmscan_busy),
71592+ atomic_read_unchecked(&fscache_n_store_vmscan_cancelled),
71593+ atomic_read_unchecked(&fscache_n_store_vmscan_wait));
71594
71595 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
71596- atomic_read(&fscache_n_op_pend),
71597- atomic_read(&fscache_n_op_run),
71598- atomic_read(&fscache_n_op_enqueue),
71599- atomic_read(&fscache_n_op_cancelled),
71600- atomic_read(&fscache_n_op_rejected));
71601+ atomic_read_unchecked(&fscache_n_op_pend),
71602+ atomic_read_unchecked(&fscache_n_op_run),
71603+ atomic_read_unchecked(&fscache_n_op_enqueue),
71604+ atomic_read_unchecked(&fscache_n_op_cancelled),
71605+ atomic_read_unchecked(&fscache_n_op_rejected));
71606 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
71607- atomic_read(&fscache_n_op_deferred_release),
71608- atomic_read(&fscache_n_op_release),
71609- atomic_read(&fscache_n_op_gc));
71610+ atomic_read_unchecked(&fscache_n_op_deferred_release),
71611+ atomic_read_unchecked(&fscache_n_op_release),
71612+ atomic_read_unchecked(&fscache_n_op_gc));
71613
71614 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
71615 atomic_read(&fscache_n_cop_alloc_object),
71616diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
71617index 28d0c7a..04816b7 100644
71618--- a/fs/fuse/cuse.c
71619+++ b/fs/fuse/cuse.c
71620@@ -611,10 +611,12 @@ static int __init cuse_init(void)
71621 INIT_LIST_HEAD(&cuse_conntbl[i]);
71622
71623 /* inherit and extend fuse_dev_operations */
71624- cuse_channel_fops = fuse_dev_operations;
71625- cuse_channel_fops.owner = THIS_MODULE;
71626- cuse_channel_fops.open = cuse_channel_open;
71627- cuse_channel_fops.release = cuse_channel_release;
71628+ pax_open_kernel();
71629+ memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
71630+ *(void **)&cuse_channel_fops.owner = THIS_MODULE;
71631+ *(void **)&cuse_channel_fops.open = cuse_channel_open;
71632+ *(void **)&cuse_channel_fops.release = cuse_channel_release;
71633+ pax_close_kernel();
71634
71635 cuse_class = class_create(THIS_MODULE, "cuse");
71636 if (IS_ERR(cuse_class))
71637diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
71638index 39706c5..a803c71 100644
71639--- a/fs/fuse/dev.c
71640+++ b/fs/fuse/dev.c
71641@@ -1405,7 +1405,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
71642 ret = 0;
71643 pipe_lock(pipe);
71644
71645- if (!pipe->readers) {
71646+ if (!atomic_read(&pipe->readers)) {
71647 send_sig(SIGPIPE, current, 0);
71648 if (!ret)
71649 ret = -EPIPE;
71650@@ -1434,7 +1434,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
71651 page_nr++;
71652 ret += buf->len;
71653
71654- if (pipe->files)
71655+ if (atomic_read(&pipe->files))
71656 do_wakeup = 1;
71657 }
71658
71659diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
71660index 1545b71..7fabe47 100644
71661--- a/fs/fuse/dir.c
71662+++ b/fs/fuse/dir.c
71663@@ -1394,7 +1394,7 @@ static char *read_link(struct dentry *dentry)
71664 return link;
71665 }
71666
71667-static void free_link(char *link)
71668+static void free_link(const char *link)
71669 {
71670 if (!IS_ERR(link))
71671 free_page((unsigned long) link);
71672diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
71673index f42dffb..4a4c435 100644
71674--- a/fs/gfs2/glock.c
71675+++ b/fs/gfs2/glock.c
71676@@ -385,9 +385,9 @@ static void state_change(struct gfs2_glock *gl, unsigned int new_state)
71677 if (held1 != held2) {
71678 GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref));
71679 if (held2)
71680- gl->gl_lockref.count++;
71681+ __lockref_inc(&gl->gl_lockref);
71682 else
71683- gl->gl_lockref.count--;
71684+ __lockref_dec(&gl->gl_lockref);
71685 }
71686 if (held1 && held2 && list_empty(&gl->gl_holders))
71687 clear_bit(GLF_QUEUED, &gl->gl_flags);
71688@@ -614,9 +614,9 @@ out:
71689 out_sched:
71690 clear_bit(GLF_LOCK, &gl->gl_flags);
71691 smp_mb__after_atomic();
71692- gl->gl_lockref.count++;
71693+ __lockref_inc(&gl->gl_lockref);
71694 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
71695- gl->gl_lockref.count--;
71696+ __lockref_dec(&gl->gl_lockref);
71697 return;
71698
71699 out_unlock:
71700@@ -742,7 +742,7 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
71701 gl->gl_sbd = sdp;
71702 gl->gl_flags = 0;
71703 gl->gl_name = name;
71704- gl->gl_lockref.count = 1;
71705+ __lockref_set(&gl->gl_lockref, 1);
71706 gl->gl_state = LM_ST_UNLOCKED;
71707 gl->gl_target = LM_ST_UNLOCKED;
71708 gl->gl_demote_state = LM_ST_EXCLUSIVE;
71709@@ -1020,9 +1020,9 @@ int gfs2_glock_nq(struct gfs2_holder *gh)
71710 if (unlikely((LM_FLAG_NOEXP & gh->gh_flags) &&
71711 test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))) {
71712 set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
71713- gl->gl_lockref.count++;
71714+ __lockref_inc(&gl->gl_lockref);
71715 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
71716- gl->gl_lockref.count--;
71717+ __lockref_dec(&gl->gl_lockref);
71718 }
71719 run_queue(gl, 1);
71720 spin_unlock(&gl->gl_spin);
71721@@ -1325,7 +1325,7 @@ void gfs2_glock_complete(struct gfs2_glock *gl, int ret)
71722 }
71723 }
71724
71725- gl->gl_lockref.count++;
71726+ __lockref_inc(&gl->gl_lockref);
71727 set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
71728 spin_unlock(&gl->gl_spin);
71729
71730@@ -1384,12 +1384,12 @@ add_back_to_lru:
71731 goto add_back_to_lru;
71732 }
71733 clear_bit(GLF_LRU, &gl->gl_flags);
71734- gl->gl_lockref.count++;
71735+ __lockref_inc(&gl->gl_lockref);
71736 if (demote_ok(gl))
71737 handle_callback(gl, LM_ST_UNLOCKED, 0, false);
71738 WARN_ON(!test_and_clear_bit(GLF_LOCK, &gl->gl_flags));
71739 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
71740- gl->gl_lockref.count--;
71741+ __lockref_dec(&gl->gl_lockref);
71742 spin_unlock(&gl->gl_spin);
71743 cond_resched_lock(&lru_lock);
71744 }
71745@@ -1719,7 +1719,7 @@ void gfs2_dump_glock(struct seq_file *seq, const struct gfs2_glock *gl)
71746 state2str(gl->gl_demote_state), dtime,
71747 atomic_read(&gl->gl_ail_count),
71748 atomic_read(&gl->gl_revokes),
71749- (int)gl->gl_lockref.count, gl->gl_hold_time);
71750+ __lockref_read(&gl->gl_lockref), gl->gl_hold_time);
71751
71752 list_for_each_entry(gh, &gl->gl_holders, gh_list)
71753 dump_holder(seq, gh);
71754diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
71755index fe91951..ce38a6e 100644
71756--- a/fs/gfs2/glops.c
71757+++ b/fs/gfs2/glops.c
71758@@ -544,9 +544,9 @@ static void iopen_go_callback(struct gfs2_glock *gl, bool remote)
71759
71760 if (gl->gl_demote_state == LM_ST_UNLOCKED &&
71761 gl->gl_state == LM_ST_SHARED && ip) {
71762- gl->gl_lockref.count++;
71763+ __lockref_inc(&gl->gl_lockref);
71764 if (queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0)
71765- gl->gl_lockref.count--;
71766+ __lockref_dec(&gl->gl_lockref);
71767 }
71768 }
71769
71770diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
71771index 3aa17d4..b338075 100644
71772--- a/fs/gfs2/quota.c
71773+++ b/fs/gfs2/quota.c
71774@@ -154,7 +154,7 @@ static enum lru_status gfs2_qd_isolate(struct list_head *item,
71775 if (!spin_trylock(&qd->qd_lockref.lock))
71776 return LRU_SKIP;
71777
71778- if (qd->qd_lockref.count == 0) {
71779+ if (__lockref_read(&qd->qd_lockref) == 0) {
71780 lockref_mark_dead(&qd->qd_lockref);
71781 list_lru_isolate_move(lru, &qd->qd_lru, dispose);
71782 }
71783@@ -221,7 +221,7 @@ static struct gfs2_quota_data *qd_alloc(unsigned hash, struct gfs2_sbd *sdp, str
71784 return NULL;
71785
71786 qd->qd_sbd = sdp;
71787- qd->qd_lockref.count = 1;
71788+ __lockref_set(&qd->qd_lockref, 1);
71789 spin_lock_init(&qd->qd_lockref.lock);
71790 qd->qd_id = qid;
71791 qd->qd_slot = -1;
71792@@ -312,7 +312,7 @@ static void qd_put(struct gfs2_quota_data *qd)
71793 if (lockref_put_or_lock(&qd->qd_lockref))
71794 return;
71795
71796- qd->qd_lockref.count = 0;
71797+ __lockref_set(&qd->qd_lockref, 0);
71798 list_lru_add(&gfs2_qd_lru, &qd->qd_lru);
71799 spin_unlock(&qd->qd_lockref.lock);
71800
71801diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
71802index fd62cae..3494dfa 100644
71803--- a/fs/hostfs/hostfs_kern.c
71804+++ b/fs/hostfs/hostfs_kern.c
71805@@ -908,7 +908,7 @@ static void *hostfs_follow_link(struct dentry *dentry, struct nameidata *nd)
71806
71807 static void hostfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
71808 {
71809- char *s = nd_get_link(nd);
71810+ const char *s = nd_get_link(nd);
71811 if (!IS_ERR(s))
71812 __putname(s);
71813 }
71814diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
71815index c274aca..772fa5e 100644
71816--- a/fs/hugetlbfs/inode.c
71817+++ b/fs/hugetlbfs/inode.c
71818@@ -148,6 +148,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
71819 struct mm_struct *mm = current->mm;
71820 struct vm_area_struct *vma;
71821 struct hstate *h = hstate_file(file);
71822+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
71823 struct vm_unmapped_area_info info;
71824
71825 if (len & ~huge_page_mask(h))
71826@@ -161,17 +162,26 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
71827 return addr;
71828 }
71829
71830+#ifdef CONFIG_PAX_RANDMMAP
71831+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
71832+#endif
71833+
71834 if (addr) {
71835 addr = ALIGN(addr, huge_page_size(h));
71836 vma = find_vma(mm, addr);
71837- if (TASK_SIZE - len >= addr &&
71838- (!vma || addr + len <= vma->vm_start))
71839+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
71840 return addr;
71841 }
71842
71843 info.flags = 0;
71844 info.length = len;
71845 info.low_limit = TASK_UNMAPPED_BASE;
71846+
71847+#ifdef CONFIG_PAX_RANDMMAP
71848+ if (mm->pax_flags & MF_PAX_RANDMMAP)
71849+ info.low_limit += mm->delta_mmap;
71850+#endif
71851+
71852 info.high_limit = TASK_SIZE;
71853 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
71854 info.align_offset = 0;
71855@@ -912,7 +922,7 @@ static struct file_system_type hugetlbfs_fs_type = {
71856 };
71857 MODULE_ALIAS_FS("hugetlbfs");
71858
71859-static struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
71860+struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
71861
71862 static int can_do_hugetlb_shm(void)
71863 {
71864diff --git a/fs/inode.c b/fs/inode.c
71865index c60671d..9c2eb5f 100644
71866--- a/fs/inode.c
71867+++ b/fs/inode.c
71868@@ -830,16 +830,20 @@ unsigned int get_next_ino(void)
71869 unsigned int *p = &get_cpu_var(last_ino);
71870 unsigned int res = *p;
71871
71872+start:
71873+
71874 #ifdef CONFIG_SMP
71875 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
71876- static atomic_t shared_last_ino;
71877- int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
71878+ static atomic_unchecked_t shared_last_ino;
71879+ int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
71880
71881 res = next - LAST_INO_BATCH;
71882 }
71883 #endif
71884
71885- *p = ++res;
71886+ if (unlikely(!++res))
71887+ goto start; /* never zero */
71888+ *p = res;
71889 put_cpu_var(last_ino);
71890 return res;
71891 }
71892diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
71893index 4a6cf28..d3a29d3 100644
71894--- a/fs/jffs2/erase.c
71895+++ b/fs/jffs2/erase.c
71896@@ -452,7 +452,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
71897 struct jffs2_unknown_node marker = {
71898 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
71899 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
71900- .totlen = cpu_to_je32(c->cleanmarker_size)
71901+ .totlen = cpu_to_je32(c->cleanmarker_size),
71902+ .hdr_crc = cpu_to_je32(0)
71903 };
71904
71905 jffs2_prealloc_raw_node_refs(c, jeb, 1);
71906diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
71907index 09ed551..45684f8 100644
71908--- a/fs/jffs2/wbuf.c
71909+++ b/fs/jffs2/wbuf.c
71910@@ -1023,7 +1023,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
71911 {
71912 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
71913 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
71914- .totlen = constant_cpu_to_je32(8)
71915+ .totlen = constant_cpu_to_je32(8),
71916+ .hdr_crc = constant_cpu_to_je32(0)
71917 };
71918
71919 /*
71920diff --git a/fs/jfs/super.c b/fs/jfs/super.c
71921index 5d30c56..8c45372 100644
71922--- a/fs/jfs/super.c
71923+++ b/fs/jfs/super.c
71924@@ -901,7 +901,7 @@ static int __init init_jfs_fs(void)
71925
71926 jfs_inode_cachep =
71927 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
71928- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
71929+ SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
71930 init_once);
71931 if (jfs_inode_cachep == NULL)
71932 return -ENOMEM;
71933diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c
71934index 345b35f..da7e0da 100644
71935--- a/fs/kernfs/dir.c
71936+++ b/fs/kernfs/dir.c
71937@@ -182,7 +182,7 @@ struct kernfs_node *kernfs_get_parent(struct kernfs_node *kn)
71938 *
71939 * Returns 31 bit hash of ns + name (so it fits in an off_t )
71940 */
71941-static unsigned int kernfs_name_hash(const char *name, const void *ns)
71942+static unsigned int kernfs_name_hash(const unsigned char *name, const void *ns)
71943 {
71944 unsigned long hash = init_name_hash();
71945 unsigned int len = strlen(name);
71946@@ -838,6 +838,12 @@ static int kernfs_iop_mkdir(struct inode *dir, struct dentry *dentry,
71947 ret = scops->mkdir(parent, dentry->d_name.name, mode);
71948
71949 kernfs_put_active(parent);
71950+
71951+ if (!ret) {
71952+ struct dentry *dentry_ret = kernfs_iop_lookup(dir, dentry, 0);
71953+ ret = PTR_ERR_OR_ZERO(dentry_ret);
71954+ }
71955+
71956 return ret;
71957 }
71958
71959diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c
71960index 2bacb99..f745182 100644
71961--- a/fs/kernfs/file.c
71962+++ b/fs/kernfs/file.c
71963@@ -34,7 +34,7 @@ static DEFINE_MUTEX(kernfs_open_file_mutex);
71964
71965 struct kernfs_open_node {
71966 atomic_t refcnt;
71967- atomic_t event;
71968+ atomic_unchecked_t event;
71969 wait_queue_head_t poll;
71970 struct list_head files; /* goes through kernfs_open_file.list */
71971 };
71972@@ -163,7 +163,7 @@ static int kernfs_seq_show(struct seq_file *sf, void *v)
71973 {
71974 struct kernfs_open_file *of = sf->private;
71975
71976- of->event = atomic_read(&of->kn->attr.open->event);
71977+ of->event = atomic_read_unchecked(&of->kn->attr.open->event);
71978
71979 return of->kn->attr.ops->seq_show(sf, v);
71980 }
71981@@ -207,7 +207,7 @@ static ssize_t kernfs_file_direct_read(struct kernfs_open_file *of,
71982 goto out_free;
71983 }
71984
71985- of->event = atomic_read(&of->kn->attr.open->event);
71986+ of->event = atomic_read_unchecked(&of->kn->attr.open->event);
71987 ops = kernfs_ops(of->kn);
71988 if (ops->read)
71989 len = ops->read(of, buf, len, *ppos);
71990@@ -272,7 +272,7 @@ static ssize_t kernfs_fop_write(struct file *file, const char __user *user_buf,
71991 {
71992 struct kernfs_open_file *of = kernfs_of(file);
71993 const struct kernfs_ops *ops;
71994- size_t len;
71995+ ssize_t len;
71996 char *buf;
71997
71998 if (of->atomic_write_len) {
71999@@ -385,12 +385,12 @@ static int kernfs_vma_page_mkwrite(struct vm_area_struct *vma,
72000 return ret;
72001 }
72002
72003-static int kernfs_vma_access(struct vm_area_struct *vma, unsigned long addr,
72004- void *buf, int len, int write)
72005+static ssize_t kernfs_vma_access(struct vm_area_struct *vma, unsigned long addr,
72006+ void *buf, size_t len, int write)
72007 {
72008 struct file *file = vma->vm_file;
72009 struct kernfs_open_file *of = kernfs_of(file);
72010- int ret;
72011+ ssize_t ret;
72012
72013 if (!of->vm_ops)
72014 return -EINVAL;
72015@@ -569,7 +569,7 @@ static int kernfs_get_open_node(struct kernfs_node *kn,
72016 return -ENOMEM;
72017
72018 atomic_set(&new_on->refcnt, 0);
72019- atomic_set(&new_on->event, 1);
72020+ atomic_set_unchecked(&new_on->event, 1);
72021 init_waitqueue_head(&new_on->poll);
72022 INIT_LIST_HEAD(&new_on->files);
72023 goto retry;
72024@@ -793,7 +793,7 @@ static unsigned int kernfs_fop_poll(struct file *filp, poll_table *wait)
72025
72026 kernfs_put_active(kn);
72027
72028- if (of->event != atomic_read(&on->event))
72029+ if (of->event != atomic_read_unchecked(&on->event))
72030 goto trigger;
72031
72032 return DEFAULT_POLLMASK;
72033@@ -824,7 +824,7 @@ repeat:
72034
72035 on = kn->attr.open;
72036 if (on) {
72037- atomic_inc(&on->event);
72038+ atomic_inc_unchecked(&on->event);
72039 wake_up_interruptible(&on->poll);
72040 }
72041
72042diff --git a/fs/kernfs/symlink.c b/fs/kernfs/symlink.c
72043index 8a19889..4c3069a 100644
72044--- a/fs/kernfs/symlink.c
72045+++ b/fs/kernfs/symlink.c
72046@@ -128,7 +128,7 @@ static void *kernfs_iop_follow_link(struct dentry *dentry, struct nameidata *nd)
72047 static void kernfs_iop_put_link(struct dentry *dentry, struct nameidata *nd,
72048 void *cookie)
72049 {
72050- char *page = nd_get_link(nd);
72051+ const char *page = nd_get_link(nd);
72052 if (!IS_ERR(page))
72053 free_page((unsigned long)page);
72054 }
72055diff --git a/fs/libfs.c b/fs/libfs.c
72056index 0ab6512..cd9982d 100644
72057--- a/fs/libfs.c
72058+++ b/fs/libfs.c
72059@@ -160,6 +160,9 @@ int dcache_readdir(struct file *file, struct dir_context *ctx)
72060
72061 for (p = q->next; p != &dentry->d_subdirs; p = p->next) {
72062 struct dentry *next = list_entry(p, struct dentry, d_child);
72063+ char d_name[sizeof(next->d_iname)];
72064+ const unsigned char *name;
72065+
72066 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
72067 if (!simple_positive(next)) {
72068 spin_unlock(&next->d_lock);
72069@@ -168,7 +171,12 @@ int dcache_readdir(struct file *file, struct dir_context *ctx)
72070
72071 spin_unlock(&next->d_lock);
72072 spin_unlock(&dentry->d_lock);
72073- if (!dir_emit(ctx, next->d_name.name, next->d_name.len,
72074+ name = next->d_name.name;
72075+ if (name == next->d_iname) {
72076+ memcpy(d_name, name, next->d_name.len);
72077+ name = d_name;
72078+ }
72079+ if (!dir_emit(ctx, name, next->d_name.len,
72080 next->d_inode->i_ino, dt_type(next->d_inode)))
72081 return 0;
72082 spin_lock(&dentry->d_lock);
72083@@ -1027,7 +1035,7 @@ EXPORT_SYMBOL(noop_fsync);
72084 void kfree_put_link(struct dentry *dentry, struct nameidata *nd,
72085 void *cookie)
72086 {
72087- char *s = nd_get_link(nd);
72088+ const char *s = nd_get_link(nd);
72089 if (!IS_ERR(s))
72090 kfree(s);
72091 }
72092diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
72093index acd3947..1f896e2 100644
72094--- a/fs/lockd/clntproc.c
72095+++ b/fs/lockd/clntproc.c
72096@@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
72097 /*
72098 * Cookie counter for NLM requests
72099 */
72100-static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
72101+static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
72102
72103 void nlmclnt_next_cookie(struct nlm_cookie *c)
72104 {
72105- u32 cookie = atomic_inc_return(&nlm_cookie);
72106+ u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
72107
72108 memcpy(c->data, &cookie, 4);
72109 c->len=4;
72110diff --git a/fs/mount.h b/fs/mount.h
72111index 6a61c2b..bd79179 100644
72112--- a/fs/mount.h
72113+++ b/fs/mount.h
72114@@ -13,7 +13,7 @@ struct mnt_namespace {
72115 u64 seq; /* Sequence number to prevent loops */
72116 wait_queue_head_t poll;
72117 u64 event;
72118-};
72119+} __randomize_layout;
72120
72121 struct mnt_pcp {
72122 int mnt_count;
72123@@ -65,7 +65,7 @@ struct mount {
72124 struct hlist_head mnt_pins;
72125 struct fs_pin mnt_umount;
72126 struct dentry *mnt_ex_mountpoint;
72127-};
72128+} __randomize_layout;
72129
72130 #define MNT_NS_INTERNAL ERR_PTR(-EINVAL) /* distinct from any mnt_namespace */
72131
72132diff --git a/fs/namei.c b/fs/namei.c
72133index 50a8583..44c470a 100644
72134--- a/fs/namei.c
72135+++ b/fs/namei.c
72136@@ -337,17 +337,32 @@ int generic_permission(struct inode *inode, int mask)
72137 if (ret != -EACCES)
72138 return ret;
72139
72140+#ifdef CONFIG_GRKERNSEC
72141+ /* we'll block if we have to log due to a denied capability use */
72142+ if (mask & MAY_NOT_BLOCK)
72143+ return -ECHILD;
72144+#endif
72145+
72146 if (S_ISDIR(inode->i_mode)) {
72147 /* DACs are overridable for directories */
72148- if (capable_wrt_inode_uidgid(inode, CAP_DAC_OVERRIDE))
72149- return 0;
72150 if (!(mask & MAY_WRITE))
72151- if (capable_wrt_inode_uidgid(inode,
72152- CAP_DAC_READ_SEARCH))
72153+ if (capable_wrt_inode_uidgid_nolog(inode, CAP_DAC_OVERRIDE) ||
72154+ capable_wrt_inode_uidgid(inode, CAP_DAC_READ_SEARCH))
72155 return 0;
72156+ if (capable_wrt_inode_uidgid(inode, CAP_DAC_OVERRIDE))
72157+ return 0;
72158 return -EACCES;
72159 }
72160 /*
72161+ * Searching includes executable on directories, else just read.
72162+ */
72163+ mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
72164+ if (mask == MAY_READ)
72165+ if (capable_wrt_inode_uidgid_nolog(inode, CAP_DAC_OVERRIDE) ||
72166+ capable_wrt_inode_uidgid(inode, CAP_DAC_READ_SEARCH))
72167+ return 0;
72168+
72169+ /*
72170 * Read/write DACs are always overridable.
72171 * Executable DACs are overridable when there is
72172 * at least one exec bit set.
72173@@ -356,14 +371,6 @@ int generic_permission(struct inode *inode, int mask)
72174 if (capable_wrt_inode_uidgid(inode, CAP_DAC_OVERRIDE))
72175 return 0;
72176
72177- /*
72178- * Searching includes executable on directories, else just read.
72179- */
72180- mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
72181- if (mask == MAY_READ)
72182- if (capable_wrt_inode_uidgid(inode, CAP_DAC_READ_SEARCH))
72183- return 0;
72184-
72185 return -EACCES;
72186 }
72187 EXPORT_SYMBOL(generic_permission);
72188@@ -503,7 +510,7 @@ struct nameidata {
72189 int last_type;
72190 unsigned depth;
72191 struct file *base;
72192- char *saved_names[MAX_NESTED_LINKS + 1];
72193+ const char *saved_names[MAX_NESTED_LINKS + 1];
72194 };
72195
72196 /*
72197@@ -714,13 +721,13 @@ void nd_jump_link(struct nameidata *nd, struct path *path)
72198 nd->flags |= LOOKUP_JUMPED;
72199 }
72200
72201-void nd_set_link(struct nameidata *nd, char *path)
72202+void nd_set_link(struct nameidata *nd, const char *path)
72203 {
72204 nd->saved_names[nd->depth] = path;
72205 }
72206 EXPORT_SYMBOL(nd_set_link);
72207
72208-char *nd_get_link(struct nameidata *nd)
72209+const char *nd_get_link(const struct nameidata *nd)
72210 {
72211 return nd->saved_names[nd->depth];
72212 }
72213@@ -855,7 +862,7 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
72214 {
72215 struct dentry *dentry = link->dentry;
72216 int error;
72217- char *s;
72218+ const char *s;
72219
72220 BUG_ON(nd->flags & LOOKUP_RCU);
72221
72222@@ -876,6 +883,12 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
72223 if (error)
72224 goto out_put_nd_path;
72225
72226+ if (gr_handle_follow_link(dentry->d_parent->d_inode,
72227+ dentry->d_inode, dentry, nd->path.mnt)) {
72228+ error = -EACCES;
72229+ goto out_put_nd_path;
72230+ }
72231+
72232 nd->last_type = LAST_BIND;
72233 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
72234 error = PTR_ERR(*p);
72235@@ -1640,6 +1653,8 @@ static inline int nested_symlink(struct path *path, struct nameidata *nd)
72236 if (res)
72237 break;
72238 res = walk_component(nd, path, LOOKUP_FOLLOW);
72239+ if (res >= 0 && gr_handle_symlink_owner(&link, nd->inode))
72240+ res = -EACCES;
72241 put_link(nd, &link, cookie);
72242 } while (res > 0);
72243
72244@@ -1712,7 +1727,7 @@ EXPORT_SYMBOL(full_name_hash);
72245 static inline u64 hash_name(const char *name)
72246 {
72247 unsigned long a, b, adata, bdata, mask, hash, len;
72248- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
72249+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
72250
72251 hash = a = 0;
72252 len = -sizeof(unsigned long);
72253@@ -2007,6 +2022,8 @@ static int path_lookupat(int dfd, const char *name,
72254 if (err)
72255 break;
72256 err = lookup_last(nd, &path);
72257+ if (!err && gr_handle_symlink_owner(&link, nd->inode))
72258+ err = -EACCES;
72259 put_link(nd, &link, cookie);
72260 }
72261 }
72262@@ -2014,6 +2031,13 @@ static int path_lookupat(int dfd, const char *name,
72263 if (!err)
72264 err = complete_walk(nd);
72265
72266+ if (!err && !(nd->flags & LOOKUP_PARENT)) {
72267+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
72268+ path_put(&nd->path);
72269+ err = -ENOENT;
72270+ }
72271+ }
72272+
72273 if (!err && nd->flags & LOOKUP_DIRECTORY) {
72274 if (!d_can_lookup(nd->path.dentry)) {
72275 path_put(&nd->path);
72276@@ -2035,8 +2059,15 @@ static int filename_lookup(int dfd, struct filename *name,
72277 retval = path_lookupat(dfd, name->name,
72278 flags | LOOKUP_REVAL, nd);
72279
72280- if (likely(!retval))
72281+ if (likely(!retval)) {
72282 audit_inode(name, nd->path.dentry, flags & LOOKUP_PARENT);
72283+ if (name->name[0] != '/' && nd->path.dentry && nd->inode) {
72284+ if (!gr_chroot_fchdir(nd->path.dentry, nd->path.mnt)) {
72285+ path_put(&nd->path);
72286+ return -ENOENT;
72287+ }
72288+ }
72289+ }
72290 return retval;
72291 }
72292
72293@@ -2615,6 +2646,13 @@ static int may_open(struct path *path, int acc_mode, int flag)
72294 if (flag & O_NOATIME && !inode_owner_or_capable(inode))
72295 return -EPERM;
72296
72297+ if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode))
72298+ return -EPERM;
72299+ if (gr_handle_rawio(inode))
72300+ return -EPERM;
72301+ if (!gr_acl_handle_open(dentry, path->mnt, acc_mode))
72302+ return -EACCES;
72303+
72304 return 0;
72305 }
72306
72307@@ -2846,7 +2884,7 @@ looked_up:
72308 * cleared otherwise prior to returning.
72309 */
72310 static int lookup_open(struct nameidata *nd, struct path *path,
72311- struct file *file,
72312+ struct path *link, struct file *file,
72313 const struct open_flags *op,
72314 bool got_write, int *opened)
72315 {
72316@@ -2881,6 +2919,17 @@ static int lookup_open(struct nameidata *nd, struct path *path,
72317 /* Negative dentry, just create the file */
72318 if (!dentry->d_inode && (op->open_flag & O_CREAT)) {
72319 umode_t mode = op->mode;
72320+
72321+ if (link && gr_handle_symlink_owner(link, dir->d_inode)) {
72322+ error = -EACCES;
72323+ goto out_dput;
72324+ }
72325+
72326+ if (!gr_acl_handle_creat(dentry, dir, nd->path.mnt, op->open_flag, op->acc_mode, mode)) {
72327+ error = -EACCES;
72328+ goto out_dput;
72329+ }
72330+
72331 if (!IS_POSIXACL(dir->d_inode))
72332 mode &= ~current_umask();
72333 /*
72334@@ -2902,6 +2951,8 @@ static int lookup_open(struct nameidata *nd, struct path *path,
72335 nd->flags & LOOKUP_EXCL);
72336 if (error)
72337 goto out_dput;
72338+ else
72339+ gr_handle_create(dentry, nd->path.mnt);
72340 }
72341 out_no_open:
72342 path->dentry = dentry;
72343@@ -2916,7 +2967,7 @@ out_dput:
72344 /*
72345 * Handle the last step of open()
72346 */
72347-static int do_last(struct nameidata *nd, struct path *path,
72348+static int do_last(struct nameidata *nd, struct path *path, struct path *link,
72349 struct file *file, const struct open_flags *op,
72350 int *opened, struct filename *name)
72351 {
72352@@ -2966,6 +3017,15 @@ static int do_last(struct nameidata *nd, struct path *path,
72353 if (error)
72354 return error;
72355
72356+ if (!gr_acl_handle_hidden_file(dir, nd->path.mnt)) {
72357+ error = -ENOENT;
72358+ goto out;
72359+ }
72360+ if (link && gr_handle_symlink_owner(link, nd->inode)) {
72361+ error = -EACCES;
72362+ goto out;
72363+ }
72364+
72365 audit_inode(name, dir, LOOKUP_PARENT);
72366 error = -EISDIR;
72367 /* trailing slashes? */
72368@@ -2985,7 +3045,7 @@ retry_lookup:
72369 */
72370 }
72371 mutex_lock(&dir->d_inode->i_mutex);
72372- error = lookup_open(nd, path, file, op, got_write, opened);
72373+ error = lookup_open(nd, path, link, file, op, got_write, opened);
72374 mutex_unlock(&dir->d_inode->i_mutex);
72375
72376 if (error <= 0) {
72377@@ -3009,11 +3069,28 @@ retry_lookup:
72378 goto finish_open_created;
72379 }
72380
72381+ if (!gr_acl_handle_hidden_file(path->dentry, nd->path.mnt)) {
72382+ error = -ENOENT;
72383+ goto exit_dput;
72384+ }
72385+ if (link && gr_handle_symlink_owner(link, path->dentry->d_inode)) {
72386+ error = -EACCES;
72387+ goto exit_dput;
72388+ }
72389+
72390 /*
72391 * create/update audit record if it already exists.
72392 */
72393- if (d_is_positive(path->dentry))
72394+ if (d_is_positive(path->dentry)) {
72395+ /* only check if O_CREAT is specified, all other checks need to go
72396+ into may_open */
72397+ if (gr_handle_fifo(path->dentry, path->mnt, dir, open_flag, acc_mode)) {
72398+ error = -EACCES;
72399+ goto exit_dput;
72400+ }
72401+
72402 audit_inode(name, path->dentry, 0);
72403+ }
72404
72405 /*
72406 * If atomic_open() acquired write access it is dropped now due to
72407@@ -3055,6 +3132,11 @@ finish_lookup:
72408 }
72409 }
72410 BUG_ON(inode != path->dentry->d_inode);
72411+ /* if we're resolving a symlink to another symlink */
72412+ if (link && gr_handle_symlink_owner(link, inode)) {
72413+ error = -EACCES;
72414+ goto out;
72415+ }
72416 return 1;
72417 }
72418
72419@@ -3074,7 +3156,18 @@ finish_open:
72420 path_put(&save_parent);
72421 return error;
72422 }
72423+
72424+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
72425+ error = -ENOENT;
72426+ goto out;
72427+ }
72428+ if (link && gr_handle_symlink_owner(link, nd->inode)) {
72429+ error = -EACCES;
72430+ goto out;
72431+ }
72432+
72433 audit_inode(name, nd->path.dentry, 0);
72434+
72435 error = -EISDIR;
72436 if ((open_flag & O_CREAT) && d_is_dir(nd->path.dentry))
72437 goto out;
72438@@ -3235,7 +3328,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
72439 if (unlikely(error))
72440 goto out;
72441
72442- error = do_last(nd, &path, file, op, &opened, pathname);
72443+ error = do_last(nd, &path, NULL, file, op, &opened, pathname);
72444 while (unlikely(error > 0)) { /* trailing symlink */
72445 struct path link = path;
72446 void *cookie;
72447@@ -3253,7 +3346,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
72448 error = follow_link(&link, nd, &cookie);
72449 if (unlikely(error))
72450 break;
72451- error = do_last(nd, &path, file, op, &opened, pathname);
72452+ error = do_last(nd, &path, &link, file, op, &opened, pathname);
72453 put_link(nd, &link, cookie);
72454 }
72455 out:
72456@@ -3356,9 +3449,11 @@ static struct dentry *filename_create(int dfd, struct filename *name,
72457 goto unlock;
72458
72459 error = -EEXIST;
72460- if (d_is_positive(dentry))
72461+ if (d_is_positive(dentry)) {
72462+ if (!gr_acl_handle_hidden_file(dentry, nd.path.mnt))
72463+ error = -ENOENT;
72464 goto fail;
72465-
72466+ }
72467 /*
72468 * Special case - lookup gave negative, but... we had foo/bar/
72469 * From the vfs_mknod() POV we just have a negative dentry -
72470@@ -3423,6 +3518,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname,
72471 }
72472 EXPORT_SYMBOL(user_path_create);
72473
72474+static struct dentry *user_path_create_with_name(int dfd, const char __user *pathname, struct path *path, struct filename **to, unsigned int lookup_flags)
72475+{
72476+ struct filename *tmp = getname(pathname);
72477+ struct dentry *res;
72478+ if (IS_ERR(tmp))
72479+ return ERR_CAST(tmp);
72480+ res = kern_path_create(dfd, tmp->name, path, lookup_flags);
72481+ if (IS_ERR(res))
72482+ putname(tmp);
72483+ else
72484+ *to = tmp;
72485+ return res;
72486+}
72487+
72488 int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
72489 {
72490 int error = may_create(dir, dentry);
72491@@ -3486,6 +3595,17 @@ retry:
72492
72493 if (!IS_POSIXACL(path.dentry->d_inode))
72494 mode &= ~current_umask();
72495+
72496+ if (gr_handle_chroot_mknod(dentry, path.mnt, mode)) {
72497+ error = -EPERM;
72498+ goto out;
72499+ }
72500+
72501+ if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
72502+ error = -EACCES;
72503+ goto out;
72504+ }
72505+
72506 error = security_path_mknod(&path, dentry, mode, dev);
72507 if (error)
72508 goto out;
72509@@ -3501,6 +3621,8 @@ retry:
72510 error = vfs_mknod(path.dentry->d_inode,dentry,mode,0);
72511 break;
72512 }
72513+ if (!error)
72514+ gr_handle_create(dentry, path.mnt);
72515 out:
72516 done_path_create(&path, dentry);
72517 if (retry_estale(error, lookup_flags)) {
72518@@ -3555,9 +3677,16 @@ retry:
72519
72520 if (!IS_POSIXACL(path.dentry->d_inode))
72521 mode &= ~current_umask();
72522+ if (!gr_acl_handle_mkdir(dentry, path.dentry, path.mnt)) {
72523+ error = -EACCES;
72524+ goto out;
72525+ }
72526 error = security_path_mkdir(&path, dentry, mode);
72527 if (!error)
72528 error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
72529+ if (!error)
72530+ gr_handle_create(dentry, path.mnt);
72531+out:
72532 done_path_create(&path, dentry);
72533 if (retry_estale(error, lookup_flags)) {
72534 lookup_flags |= LOOKUP_REVAL;
72535@@ -3590,7 +3719,7 @@ void dentry_unhash(struct dentry *dentry)
72536 {
72537 shrink_dcache_parent(dentry);
72538 spin_lock(&dentry->d_lock);
72539- if (dentry->d_lockref.count == 1)
72540+ if (__lockref_read(&dentry->d_lockref) == 1)
72541 __d_drop(dentry);
72542 spin_unlock(&dentry->d_lock);
72543 }
72544@@ -3641,6 +3770,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
72545 struct filename *name;
72546 struct dentry *dentry;
72547 struct nameidata nd;
72548+ u64 saved_ino = 0;
72549+ dev_t saved_dev = 0;
72550 unsigned int lookup_flags = 0;
72551 retry:
72552 name = user_path_parent(dfd, pathname, &nd, lookup_flags);
72553@@ -3673,10 +3804,21 @@ retry:
72554 error = -ENOENT;
72555 goto exit3;
72556 }
72557+
72558+ saved_ino = gr_get_ino_from_dentry(dentry);
72559+ saved_dev = gr_get_dev_from_dentry(dentry);
72560+
72561+ if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
72562+ error = -EACCES;
72563+ goto exit3;
72564+ }
72565+
72566 error = security_path_rmdir(&nd.path, dentry);
72567 if (error)
72568 goto exit3;
72569 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
72570+ if (!error && (saved_dev || saved_ino))
72571+ gr_handle_delete(saved_ino, saved_dev);
72572 exit3:
72573 dput(dentry);
72574 exit2:
72575@@ -3769,6 +3911,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
72576 struct nameidata nd;
72577 struct inode *inode = NULL;
72578 struct inode *delegated_inode = NULL;
72579+ u64 saved_ino = 0;
72580+ dev_t saved_dev = 0;
72581 unsigned int lookup_flags = 0;
72582 retry:
72583 name = user_path_parent(dfd, pathname, &nd, lookup_flags);
72584@@ -3795,10 +3939,22 @@ retry_deleg:
72585 if (d_is_negative(dentry))
72586 goto slashes;
72587 ihold(inode);
72588+
72589+ if (inode->i_nlink <= 1) {
72590+ saved_ino = gr_get_ino_from_dentry(dentry);
72591+ saved_dev = gr_get_dev_from_dentry(dentry);
72592+ }
72593+ if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
72594+ error = -EACCES;
72595+ goto exit2;
72596+ }
72597+
72598 error = security_path_unlink(&nd.path, dentry);
72599 if (error)
72600 goto exit2;
72601 error = vfs_unlink(nd.path.dentry->d_inode, dentry, &delegated_inode);
72602+ if (!error && (saved_ino || saved_dev))
72603+ gr_handle_delete(saved_ino, saved_dev);
72604 exit2:
72605 dput(dentry);
72606 }
72607@@ -3887,9 +4043,17 @@ retry:
72608 if (IS_ERR(dentry))
72609 goto out_putname;
72610
72611+ if (!gr_acl_handle_symlink(dentry, path.dentry, path.mnt, from)) {
72612+ error = -EACCES;
72613+ goto out;
72614+ }
72615+
72616 error = security_path_symlink(&path, dentry, from->name);
72617 if (!error)
72618 error = vfs_symlink(path.dentry->d_inode, dentry, from->name);
72619+ if (!error)
72620+ gr_handle_create(dentry, path.mnt);
72621+out:
72622 done_path_create(&path, dentry);
72623 if (retry_estale(error, lookup_flags)) {
72624 lookup_flags |= LOOKUP_REVAL;
72625@@ -3993,6 +4157,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
72626 struct dentry *new_dentry;
72627 struct path old_path, new_path;
72628 struct inode *delegated_inode = NULL;
72629+ struct filename *to = NULL;
72630 int how = 0;
72631 int error;
72632
72633@@ -4016,7 +4181,7 @@ retry:
72634 if (error)
72635 return error;
72636
72637- new_dentry = user_path_create(newdfd, newname, &new_path,
72638+ new_dentry = user_path_create_with_name(newdfd, newname, &new_path, &to,
72639 (how & LOOKUP_REVAL));
72640 error = PTR_ERR(new_dentry);
72641 if (IS_ERR(new_dentry))
72642@@ -4028,11 +4193,28 @@ retry:
72643 error = may_linkat(&old_path);
72644 if (unlikely(error))
72645 goto out_dput;
72646+
72647+ if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
72648+ old_path.dentry->d_inode,
72649+ old_path.dentry->d_inode->i_mode, to)) {
72650+ error = -EACCES;
72651+ goto out_dput;
72652+ }
72653+
72654+ if (!gr_acl_handle_link(new_dentry, new_path.dentry, new_path.mnt,
72655+ old_path.dentry, old_path.mnt, to)) {
72656+ error = -EACCES;
72657+ goto out_dput;
72658+ }
72659+
72660 error = security_path_link(old_path.dentry, &new_path, new_dentry);
72661 if (error)
72662 goto out_dput;
72663 error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry, &delegated_inode);
72664+ if (!error)
72665+ gr_handle_create(new_dentry, new_path.mnt);
72666 out_dput:
72667+ putname(to);
72668 done_path_create(&new_path, new_dentry);
72669 if (delegated_inode) {
72670 error = break_deleg_wait(&delegated_inode);
72671@@ -4348,6 +4530,20 @@ retry_deleg:
72672 if (new_dentry == trap)
72673 goto exit5;
72674
72675+ if (gr_bad_chroot_rename(old_dentry, oldnd.path.mnt, new_dentry, newnd.path.mnt)) {
72676+ /* use EXDEV error to cause 'mv' to switch to an alternative
72677+ * method for usability
72678+ */
72679+ error = -EXDEV;
72680+ goto exit5;
72681+ }
72682+
72683+ error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
72684+ old_dentry, old_dir->d_inode, oldnd.path.mnt,
72685+ to, flags);
72686+ if (error)
72687+ goto exit5;
72688+
72689 error = security_path_rename(&oldnd.path, old_dentry,
72690 &newnd.path, new_dentry, flags);
72691 if (error)
72692@@ -4355,6 +4551,9 @@ retry_deleg:
72693 error = vfs_rename(old_dir->d_inode, old_dentry,
72694 new_dir->d_inode, new_dentry,
72695 &delegated_inode, flags);
72696+ if (!error)
72697+ gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
72698+ new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0, flags);
72699 exit5:
72700 dput(new_dentry);
72701 exit4:
72702@@ -4411,14 +4610,24 @@ EXPORT_SYMBOL(vfs_whiteout);
72703
72704 int readlink_copy(char __user *buffer, int buflen, const char *link)
72705 {
72706+ char tmpbuf[64];
72707+ const char *newlink;
72708 int len = PTR_ERR(link);
72709+
72710 if (IS_ERR(link))
72711 goto out;
72712
72713 len = strlen(link);
72714 if (len > (unsigned) buflen)
72715 len = buflen;
72716- if (copy_to_user(buffer, link, len))
72717+
72718+ if (len < sizeof(tmpbuf)) {
72719+ memcpy(tmpbuf, link, len);
72720+ newlink = tmpbuf;
72721+ } else
72722+ newlink = link;
72723+
72724+ if (copy_to_user(buffer, newlink, len))
72725 len = -EFAULT;
72726 out:
72727 return len;
72728diff --git a/fs/namespace.c b/fs/namespace.c
72729index f07c769..9246b81 100644
72730--- a/fs/namespace.c
72731+++ b/fs/namespace.c
72732@@ -1480,6 +1480,9 @@ static int do_umount(struct mount *mnt, int flags)
72733 if (!(sb->s_flags & MS_RDONLY))
72734 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
72735 up_write(&sb->s_umount);
72736+
72737+ gr_log_remount(mnt->mnt_devname, retval);
72738+
72739 return retval;
72740 }
72741
72742@@ -1502,6 +1505,9 @@ static int do_umount(struct mount *mnt, int flags)
72743 }
72744 unlock_mount_hash();
72745 namespace_unlock();
72746+
72747+ gr_log_unmount(mnt->mnt_devname, retval);
72748+
72749 return retval;
72750 }
72751
72752@@ -1559,7 +1565,7 @@ static inline bool may_mount(void)
72753 * unixes. Our API is identical to OSF/1 to avoid making a mess of AMD
72754 */
72755
72756-SYSCALL_DEFINE2(umount, char __user *, name, int, flags)
72757+SYSCALL_DEFINE2(umount, const char __user *, name, int, flags)
72758 {
72759 struct path path;
72760 struct mount *mnt;
72761@@ -1604,7 +1610,7 @@ out:
72762 /*
72763 * The 2.0 compatible umount. No flags.
72764 */
72765-SYSCALL_DEFINE1(oldumount, char __user *, name)
72766+SYSCALL_DEFINE1(oldumount, const char __user *, name)
72767 {
72768 return sys_umount(name, 0);
72769 }
72770@@ -2673,6 +2679,16 @@ long do_mount(const char *dev_name, const char __user *dir_name,
72771 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
72772 MS_STRICTATIME);
72773
72774+ if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
72775+ retval = -EPERM;
72776+ goto dput_out;
72777+ }
72778+
72779+ if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
72780+ retval = -EPERM;
72781+ goto dput_out;
72782+ }
72783+
72784 if (flags & MS_REMOUNT)
72785 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
72786 data_page);
72787@@ -2686,7 +2702,10 @@ long do_mount(const char *dev_name, const char __user *dir_name,
72788 retval = do_new_mount(&path, type_page, flags, mnt_flags,
72789 dev_name, data_page);
72790 dput_out:
72791+ gr_log_mount(dev_name, &path, retval);
72792+
72793 path_put(&path);
72794+
72795 return retval;
72796 }
72797
72798@@ -2704,7 +2723,7 @@ static void free_mnt_ns(struct mnt_namespace *ns)
72799 * number incrementing at 10Ghz will take 12,427 years to wrap which
72800 * is effectively never, so we can ignore the possibility.
72801 */
72802-static atomic64_t mnt_ns_seq = ATOMIC64_INIT(1);
72803+static atomic64_unchecked_t mnt_ns_seq = ATOMIC64_INIT(1);
72804
72805 static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
72806 {
72807@@ -2720,7 +2739,7 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
72808 return ERR_PTR(ret);
72809 }
72810 new_ns->ns.ops = &mntns_operations;
72811- new_ns->seq = atomic64_add_return(1, &mnt_ns_seq);
72812+ new_ns->seq = atomic64_add_return_unchecked(1, &mnt_ns_seq);
72813 atomic_set(&new_ns->count, 1);
72814 new_ns->root = NULL;
72815 INIT_LIST_HEAD(&new_ns->list);
72816@@ -2730,7 +2749,7 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
72817 return new_ns;
72818 }
72819
72820-struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
72821+__latent_entropy struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
72822 struct user_namespace *user_ns, struct fs_struct *new_fs)
72823 {
72824 struct mnt_namespace *new_ns;
72825@@ -2851,8 +2870,8 @@ struct dentry *mount_subtree(struct vfsmount *mnt, const char *name)
72826 }
72827 EXPORT_SYMBOL(mount_subtree);
72828
72829-SYSCALL_DEFINE5(mount, char __user *, dev_name, char __user *, dir_name,
72830- char __user *, type, unsigned long, flags, void __user *, data)
72831+SYSCALL_DEFINE5(mount, const char __user *, dev_name, const char __user *, dir_name,
72832+ const char __user *, type, unsigned long, flags, void __user *, data)
72833 {
72834 int ret;
72835 char *kernel_type;
72836@@ -2958,6 +2977,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
72837 if (error)
72838 goto out2;
72839
72840+ if (gr_handle_chroot_pivot()) {
72841+ error = -EPERM;
72842+ goto out2;
72843+ }
72844+
72845 get_fs_root(current->fs, &root);
72846 old_mp = lock_mount(&old);
72847 error = PTR_ERR(old_mp);
72848@@ -3242,7 +3266,7 @@ static int mntns_install(struct nsproxy *nsproxy, struct ns_common *ns)
72849 !ns_capable(current_user_ns(), CAP_SYS_ADMIN))
72850 return -EPERM;
72851
72852- if (fs->users != 1)
72853+ if (atomic_read(&fs->users) != 1)
72854 return -EINVAL;
72855
72856 get_mnt_ns(mnt_ns);
72857diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
72858index 19ca95c..b28702c 100644
72859--- a/fs/nfs/callback_xdr.c
72860+++ b/fs/nfs/callback_xdr.c
72861@@ -51,7 +51,7 @@ struct callback_op {
72862 callback_decode_arg_t decode_args;
72863 callback_encode_res_t encode_res;
72864 long res_maxsize;
72865-};
72866+} __do_const;
72867
72868 static struct callback_op callback_ops[];
72869
72870diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
72871index d42dff6..ecbdf42 100644
72872--- a/fs/nfs/inode.c
72873+++ b/fs/nfs/inode.c
72874@@ -1270,16 +1270,16 @@ static int nfs_ctime_need_update(const struct inode *inode, const struct nfs_fat
72875 return timespec_compare(&fattr->ctime, &inode->i_ctime) > 0;
72876 }
72877
72878-static atomic_long_t nfs_attr_generation_counter;
72879+static atomic_long_unchecked_t nfs_attr_generation_counter;
72880
72881 static unsigned long nfs_read_attr_generation_counter(void)
72882 {
72883- return atomic_long_read(&nfs_attr_generation_counter);
72884+ return atomic_long_read_unchecked(&nfs_attr_generation_counter);
72885 }
72886
72887 unsigned long nfs_inc_attr_generation_counter(void)
72888 {
72889- return atomic_long_inc_return(&nfs_attr_generation_counter);
72890+ return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
72891 }
72892 EXPORT_SYMBOL_GPL(nfs_inc_attr_generation_counter);
72893
72894diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
72895index 5416968..0942042 100644
72896--- a/fs/nfsd/nfs4proc.c
72897+++ b/fs/nfsd/nfs4proc.c
72898@@ -1496,7 +1496,7 @@ struct nfsd4_operation {
72899 nfsd4op_rsize op_rsize_bop;
72900 stateid_getter op_get_currentstateid;
72901 stateid_setter op_set_currentstateid;
72902-};
72903+} __do_const;
72904
72905 static struct nfsd4_operation nfsd4_ops[];
72906
72907diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
72908index 5b33ce1..c2a92aa 100644
72909--- a/fs/nfsd/nfs4xdr.c
72910+++ b/fs/nfsd/nfs4xdr.c
72911@@ -1703,7 +1703,7 @@ nfsd4_decode_notsupp(struct nfsd4_compoundargs *argp, void *p)
72912
72913 typedef __be32(*nfsd4_dec)(struct nfsd4_compoundargs *argp, void *);
72914
72915-static nfsd4_dec nfsd4_dec_ops[] = {
72916+static const nfsd4_dec nfsd4_dec_ops[] = {
72917 [OP_ACCESS] = (nfsd4_dec)nfsd4_decode_access,
72918 [OP_CLOSE] = (nfsd4_dec)nfsd4_decode_close,
72919 [OP_COMMIT] = (nfsd4_dec)nfsd4_decode_commit,
72920diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c
72921index 46ec934..f384e41 100644
72922--- a/fs/nfsd/nfscache.c
72923+++ b/fs/nfsd/nfscache.c
72924@@ -541,7 +541,7 @@ nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
72925 struct kvec *resv = &rqstp->rq_res.head[0], *cachv;
72926 u32 hash;
72927 struct nfsd_drc_bucket *b;
72928- int len;
72929+ long len;
72930 size_t bufsize = 0;
72931
72932 if (!rp)
72933@@ -550,11 +550,14 @@ nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
72934 hash = nfsd_cache_hash(rp->c_xid);
72935 b = &drc_hashtbl[hash];
72936
72937- len = resv->iov_len - ((char*)statp - (char*)resv->iov_base);
72938- len >>= 2;
72939+ if (statp) {
72940+ len = (char*)statp - (char*)resv->iov_base;
72941+ len = resv->iov_len - len;
72942+ len >>= 2;
72943+ }
72944
72945 /* Don't cache excessive amounts of data and XDR failures */
72946- if (!statp || len > (256 >> 2)) {
72947+ if (!statp || len > (256 >> 2) || len < 0) {
72948 nfsd_reply_cache_free(b, rp);
72949 return;
72950 }
72951@@ -562,7 +565,7 @@ nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
72952 switch (cachetype) {
72953 case RC_REPLSTAT:
72954 if (len != 1)
72955- printk("nfsd: RC_REPLSTAT/reply len %d!\n",len);
72956+ printk("nfsd: RC_REPLSTAT/reply len %ld!\n",len);
72957 rp->c_replstat = *statp;
72958 break;
72959 case RC_REPLBUFF:
72960diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
72961index 3685265..e77261e 100644
72962--- a/fs/nfsd/vfs.c
72963+++ b/fs/nfsd/vfs.c
72964@@ -893,7 +893,7 @@ __be32 nfsd_readv(struct file *file, loff_t offset, struct kvec *vec, int vlen,
72965
72966 oldfs = get_fs();
72967 set_fs(KERNEL_DS);
72968- host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
72969+ host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
72970 set_fs(oldfs);
72971 return nfsd_finish_read(file, count, host_err);
72972 }
72973@@ -980,7 +980,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
72974
72975 /* Write the data. */
72976 oldfs = get_fs(); set_fs(KERNEL_DS);
72977- host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &pos);
72978+ host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &pos);
72979 set_fs(oldfs);
72980 if (host_err < 0)
72981 goto out_nfserr;
72982@@ -1525,7 +1525,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
72983 */
72984
72985 oldfs = get_fs(); set_fs(KERNEL_DS);
72986- host_err = inode->i_op->readlink(path.dentry, (char __user *)buf, *lenp);
72987+ host_err = inode->i_op->readlink(path.dentry, (char __force_user *)buf, *lenp);
72988 set_fs(oldfs);
72989
72990 if (host_err < 0)
72991diff --git a/fs/nls/nls_base.c b/fs/nls/nls_base.c
72992index 52ccd34..7a6b202 100644
72993--- a/fs/nls/nls_base.c
72994+++ b/fs/nls/nls_base.c
72995@@ -234,21 +234,25 @@ EXPORT_SYMBOL(utf16s_to_utf8s);
72996
72997 int __register_nls(struct nls_table *nls, struct module *owner)
72998 {
72999- struct nls_table ** tmp = &tables;
73000+ struct nls_table *tmp = tables;
73001
73002 if (nls->next)
73003 return -EBUSY;
73004
73005- nls->owner = owner;
73006+ pax_open_kernel();
73007+ *(void **)&nls->owner = owner;
73008+ pax_close_kernel();
73009 spin_lock(&nls_lock);
73010- while (*tmp) {
73011- if (nls == *tmp) {
73012+ while (tmp) {
73013+ if (nls == tmp) {
73014 spin_unlock(&nls_lock);
73015 return -EBUSY;
73016 }
73017- tmp = &(*tmp)->next;
73018+ tmp = tmp->next;
73019 }
73020- nls->next = tables;
73021+ pax_open_kernel();
73022+ *(struct nls_table **)&nls->next = tables;
73023+ pax_close_kernel();
73024 tables = nls;
73025 spin_unlock(&nls_lock);
73026 return 0;
73027@@ -257,12 +261,14 @@ EXPORT_SYMBOL(__register_nls);
73028
73029 int unregister_nls(struct nls_table * nls)
73030 {
73031- struct nls_table ** tmp = &tables;
73032+ struct nls_table * const * tmp = &tables;
73033
73034 spin_lock(&nls_lock);
73035 while (*tmp) {
73036 if (nls == *tmp) {
73037- *tmp = nls->next;
73038+ pax_open_kernel();
73039+ *(struct nls_table **)tmp = nls->next;
73040+ pax_close_kernel();
73041 spin_unlock(&nls_lock);
73042 return 0;
73043 }
73044@@ -272,7 +278,7 @@ int unregister_nls(struct nls_table * nls)
73045 return -EINVAL;
73046 }
73047
73048-static struct nls_table *find_nls(char *charset)
73049+static struct nls_table *find_nls(const char *charset)
73050 {
73051 struct nls_table *nls;
73052 spin_lock(&nls_lock);
73053@@ -288,7 +294,7 @@ static struct nls_table *find_nls(char *charset)
73054 return nls;
73055 }
73056
73057-struct nls_table *load_nls(char *charset)
73058+struct nls_table *load_nls(const char *charset)
73059 {
73060 return try_then_request_module(find_nls(charset), "nls_%s", charset);
73061 }
73062diff --git a/fs/nls/nls_euc-jp.c b/fs/nls/nls_euc-jp.c
73063index 162b3f1..6076a7c 100644
73064--- a/fs/nls/nls_euc-jp.c
73065+++ b/fs/nls/nls_euc-jp.c
73066@@ -560,8 +560,10 @@ static int __init init_nls_euc_jp(void)
73067 p_nls = load_nls("cp932");
73068
73069 if (p_nls) {
73070- table.charset2upper = p_nls->charset2upper;
73071- table.charset2lower = p_nls->charset2lower;
73072+ pax_open_kernel();
73073+ *(const unsigned char **)&table.charset2upper = p_nls->charset2upper;
73074+ *(const unsigned char **)&table.charset2lower = p_nls->charset2lower;
73075+ pax_close_kernel();
73076 return register_nls(&table);
73077 }
73078
73079diff --git a/fs/nls/nls_koi8-ru.c b/fs/nls/nls_koi8-ru.c
73080index a80a741..7b96e1b 100644
73081--- a/fs/nls/nls_koi8-ru.c
73082+++ b/fs/nls/nls_koi8-ru.c
73083@@ -62,8 +62,10 @@ static int __init init_nls_koi8_ru(void)
73084 p_nls = load_nls("koi8-u");
73085
73086 if (p_nls) {
73087- table.charset2upper = p_nls->charset2upper;
73088- table.charset2lower = p_nls->charset2lower;
73089+ pax_open_kernel();
73090+ *(const unsigned char **)&table.charset2upper = p_nls->charset2upper;
73091+ *(const unsigned char **)&table.charset2lower = p_nls->charset2lower;
73092+ pax_close_kernel();
73093 return register_nls(&table);
73094 }
73095
73096diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
73097index cf27550..6c70f29d 100644
73098--- a/fs/notify/fanotify/fanotify_user.c
73099+++ b/fs/notify/fanotify/fanotify_user.c
73100@@ -216,8 +216,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
73101
73102 fd = fanotify_event_metadata.fd;
73103 ret = -EFAULT;
73104- if (copy_to_user(buf, &fanotify_event_metadata,
73105- fanotify_event_metadata.event_len))
73106+ if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
73107+ copy_to_user(buf, &fanotify_event_metadata, fanotify_event_metadata.event_len))
73108 goto out_close_fd;
73109
73110 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
73111diff --git a/fs/notify/notification.c b/fs/notify/notification.c
73112index a95d8e0..a91a5fd 100644
73113--- a/fs/notify/notification.c
73114+++ b/fs/notify/notification.c
73115@@ -48,7 +48,7 @@
73116 #include <linux/fsnotify_backend.h>
73117 #include "fsnotify.h"
73118
73119-static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
73120+static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
73121
73122 /**
73123 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
73124@@ -56,7 +56,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
73125 */
73126 u32 fsnotify_get_cookie(void)
73127 {
73128- return atomic_inc_return(&fsnotify_sync_cookie);
73129+ return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
73130 }
73131 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
73132
73133diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
73134index 9e38daf..5727cae 100644
73135--- a/fs/ntfs/dir.c
73136+++ b/fs/ntfs/dir.c
73137@@ -1310,7 +1310,7 @@ find_next_index_buffer:
73138 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
73139 ~(s64)(ndir->itype.index.block_size - 1)));
73140 /* Bounds checks. */
73141- if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
73142+ if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
73143 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
73144 "inode 0x%lx or driver bug.", vdir->i_ino);
73145 goto err_out;
73146diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
73147index 1da9b2d..9cca092a 100644
73148--- a/fs/ntfs/file.c
73149+++ b/fs/ntfs/file.c
73150@@ -1281,7 +1281,7 @@ static inline size_t ntfs_copy_from_user(struct page **pages,
73151 char *addr;
73152 size_t total = 0;
73153 unsigned len;
73154- int left;
73155+ unsigned left;
73156
73157 do {
73158 len = PAGE_CACHE_SIZE - ofs;
73159diff --git a/fs/ntfs/super.c b/fs/ntfs/super.c
73160index 9e1e112..241a52a 100644
73161--- a/fs/ntfs/super.c
73162+++ b/fs/ntfs/super.c
73163@@ -688,7 +688,7 @@ static struct buffer_head *read_ntfs_boot_sector(struct super_block *sb,
73164 if (!silent)
73165 ntfs_error(sb, "Primary boot sector is invalid.");
73166 } else if (!silent)
73167- ntfs_error(sb, read_err_str, "primary");
73168+ ntfs_error(sb, read_err_str, "%s", "primary");
73169 if (!(NTFS_SB(sb)->on_errors & ON_ERRORS_RECOVER)) {
73170 if (bh_primary)
73171 brelse(bh_primary);
73172@@ -704,7 +704,7 @@ static struct buffer_head *read_ntfs_boot_sector(struct super_block *sb,
73173 goto hotfix_primary_boot_sector;
73174 brelse(bh_backup);
73175 } else if (!silent)
73176- ntfs_error(sb, read_err_str, "backup");
73177+ ntfs_error(sb, read_err_str, "%s", "backup");
73178 /* Try to read NT3.51- backup boot sector. */
73179 if ((bh_backup = sb_bread(sb, nr_blocks >> 1))) {
73180 if (is_boot_sector_ntfs(sb, (NTFS_BOOT_SECTOR*)
73181@@ -715,7 +715,7 @@ static struct buffer_head *read_ntfs_boot_sector(struct super_block *sb,
73182 "sector.");
73183 brelse(bh_backup);
73184 } else if (!silent)
73185- ntfs_error(sb, read_err_str, "backup");
73186+ ntfs_error(sb, read_err_str, "%s", "backup");
73187 /* We failed. Cleanup and return. */
73188 if (bh_primary)
73189 brelse(bh_primary);
73190diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
73191index 0440134..d52c93a 100644
73192--- a/fs/ocfs2/localalloc.c
73193+++ b/fs/ocfs2/localalloc.c
73194@@ -1320,7 +1320,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
73195 goto bail;
73196 }
73197
73198- atomic_inc(&osb->alloc_stats.moves);
73199+ atomic_inc_unchecked(&osb->alloc_stats.moves);
73200
73201 bail:
73202 if (handle)
73203diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
73204index 460c6c3..b4ef513 100644
73205--- a/fs/ocfs2/ocfs2.h
73206+++ b/fs/ocfs2/ocfs2.h
73207@@ -247,11 +247,11 @@ enum ocfs2_vol_state
73208
73209 struct ocfs2_alloc_stats
73210 {
73211- atomic_t moves;
73212- atomic_t local_data;
73213- atomic_t bitmap_data;
73214- atomic_t bg_allocs;
73215- atomic_t bg_extends;
73216+ atomic_unchecked_t moves;
73217+ atomic_unchecked_t local_data;
73218+ atomic_unchecked_t bitmap_data;
73219+ atomic_unchecked_t bg_allocs;
73220+ atomic_unchecked_t bg_extends;
73221 };
73222
73223 enum ocfs2_local_alloc_state
73224diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
73225index ee541f9..df3a500 100644
73226--- a/fs/ocfs2/refcounttree.c
73227+++ b/fs/ocfs2/refcounttree.c
73228@@ -4276,7 +4276,7 @@ static int ocfs2_reflink(struct dentry *old_dentry, struct inode *dir,
73229 error = posix_acl_create(dir, &mode, &default_acl, &acl);
73230 if (error) {
73231 mlog_errno(error);
73232- goto out;
73233+ return error;
73234 }
73235
73236 error = ocfs2_create_inode_in_orphan(dir, mode,
73237diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
73238index 0cb889a..6a26b24 100644
73239--- a/fs/ocfs2/suballoc.c
73240+++ b/fs/ocfs2/suballoc.c
73241@@ -867,7 +867,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
73242 mlog_errno(status);
73243 goto bail;
73244 }
73245- atomic_inc(&osb->alloc_stats.bg_extends);
73246+ atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
73247
73248 /* You should never ask for this much metadata */
73249 BUG_ON(bits_wanted >
73250@@ -2014,7 +2014,7 @@ int ocfs2_claim_metadata(handle_t *handle,
73251 mlog_errno(status);
73252 goto bail;
73253 }
73254- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
73255+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
73256
73257 *suballoc_loc = res.sr_bg_blkno;
73258 *suballoc_bit_start = res.sr_bit_offset;
73259@@ -2180,7 +2180,7 @@ int ocfs2_claim_new_inode_at_loc(handle_t *handle,
73260 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
73261 res->sr_bits);
73262
73263- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
73264+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
73265
73266 BUG_ON(res->sr_bits != 1);
73267
73268@@ -2222,7 +2222,7 @@ int ocfs2_claim_new_inode(handle_t *handle,
73269 mlog_errno(status);
73270 goto bail;
73271 }
73272- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
73273+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
73274
73275 BUG_ON(res.sr_bits != 1);
73276
73277@@ -2326,7 +2326,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
73278 cluster_start,
73279 num_clusters);
73280 if (!status)
73281- atomic_inc(&osb->alloc_stats.local_data);
73282+ atomic_inc_unchecked(&osb->alloc_stats.local_data);
73283 } else {
73284 if (min_clusters > (osb->bitmap_cpg - 1)) {
73285 /* The only paths asking for contiguousness
73286@@ -2352,7 +2352,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
73287 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
73288 res.sr_bg_blkno,
73289 res.sr_bit_offset);
73290- atomic_inc(&osb->alloc_stats.bitmap_data);
73291+ atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
73292 *num_clusters = res.sr_bits;
73293 }
73294 }
73295diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
73296index 2667518..24bcf79 100644
73297--- a/fs/ocfs2/super.c
73298+++ b/fs/ocfs2/super.c
73299@@ -308,11 +308,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
73300 "%10s => GlobalAllocs: %d LocalAllocs: %d "
73301 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
73302 "Stats",
73303- atomic_read(&osb->alloc_stats.bitmap_data),
73304- atomic_read(&osb->alloc_stats.local_data),
73305- atomic_read(&osb->alloc_stats.bg_allocs),
73306- atomic_read(&osb->alloc_stats.moves),
73307- atomic_read(&osb->alloc_stats.bg_extends));
73308+ atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
73309+ atomic_read_unchecked(&osb->alloc_stats.local_data),
73310+ atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
73311+ atomic_read_unchecked(&osb->alloc_stats.moves),
73312+ atomic_read_unchecked(&osb->alloc_stats.bg_extends));
73313
73314 out += snprintf(buf + out, len - out,
73315 "%10s => State: %u Descriptor: %llu Size: %u bits "
73316@@ -2093,11 +2093,11 @@ static int ocfs2_initialize_super(struct super_block *sb,
73317
73318 mutex_init(&osb->system_file_mutex);
73319
73320- atomic_set(&osb->alloc_stats.moves, 0);
73321- atomic_set(&osb->alloc_stats.local_data, 0);
73322- atomic_set(&osb->alloc_stats.bitmap_data, 0);
73323- atomic_set(&osb->alloc_stats.bg_allocs, 0);
73324- atomic_set(&osb->alloc_stats.bg_extends, 0);
73325+ atomic_set_unchecked(&osb->alloc_stats.moves, 0);
73326+ atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
73327+ atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
73328+ atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
73329+ atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
73330
73331 /* Copy the blockcheck stats from the superblock probe */
73332 osb->osb_ecc_stats = *stats;
73333diff --git a/fs/open.c b/fs/open.c
73334index 44a3be1..5e97aa1 100644
73335--- a/fs/open.c
73336+++ b/fs/open.c
73337@@ -32,6 +32,8 @@
73338 #include <linux/dnotify.h>
73339 #include <linux/compat.h>
73340
73341+#define CREATE_TRACE_POINTS
73342+#include <trace/events/fs.h>
73343 #include "internal.h"
73344
73345 int do_truncate(struct dentry *dentry, loff_t length, unsigned int time_attrs,
73346@@ -103,6 +105,8 @@ long vfs_truncate(struct path *path, loff_t length)
73347 error = locks_verify_truncate(inode, NULL, length);
73348 if (!error)
73349 error = security_path_truncate(path);
73350+ if (!error && !gr_acl_handle_truncate(path->dentry, path->mnt))
73351+ error = -EACCES;
73352 if (!error)
73353 error = do_truncate(path->dentry, length, 0, NULL);
73354
73355@@ -187,6 +191,8 @@ static long do_sys_ftruncate(unsigned int fd, loff_t length, int small)
73356 error = locks_verify_truncate(inode, f.file, length);
73357 if (!error)
73358 error = security_path_truncate(&f.file->f_path);
73359+ if (!error && !gr_acl_handle_truncate(f.file->f_path.dentry, f.file->f_path.mnt))
73360+ error = -EACCES;
73361 if (!error)
73362 error = do_truncate(dentry, length, ATTR_MTIME|ATTR_CTIME, f.file);
73363 sb_end_write(inode->i_sb);
73364@@ -392,6 +398,9 @@ retry:
73365 if (__mnt_is_readonly(path.mnt))
73366 res = -EROFS;
73367
73368+ if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
73369+ res = -EACCES;
73370+
73371 out_path_release:
73372 path_put(&path);
73373 if (retry_estale(res, lookup_flags)) {
73374@@ -423,6 +432,8 @@ retry:
73375 if (error)
73376 goto dput_and_out;
73377
73378+ gr_log_chdir(path.dentry, path.mnt);
73379+
73380 set_fs_pwd(current->fs, &path);
73381
73382 dput_and_out:
73383@@ -452,6 +463,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
73384 goto out_putf;
73385
73386 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
73387+
73388+ if (!error && !gr_chroot_fchdir(f.file->f_path.dentry, f.file->f_path.mnt))
73389+ error = -EPERM;
73390+
73391+ if (!error)
73392+ gr_log_chdir(f.file->f_path.dentry, f.file->f_path.mnt);
73393+
73394 if (!error)
73395 set_fs_pwd(current->fs, &f.file->f_path);
73396 out_putf:
73397@@ -481,7 +499,13 @@ retry:
73398 if (error)
73399 goto dput_and_out;
73400
73401+ if (gr_handle_chroot_chroot(path.dentry, path.mnt))
73402+ goto dput_and_out;
73403+
73404 set_fs_root(current->fs, &path);
73405+
73406+ gr_handle_chroot_chdir(&path);
73407+
73408 error = 0;
73409 dput_and_out:
73410 path_put(&path);
73411@@ -505,6 +529,16 @@ static int chmod_common(struct path *path, umode_t mode)
73412 return error;
73413 retry_deleg:
73414 mutex_lock(&inode->i_mutex);
73415+
73416+ if (!gr_acl_handle_chmod(path->dentry, path->mnt, &mode)) {
73417+ error = -EACCES;
73418+ goto out_unlock;
73419+ }
73420+ if (gr_handle_chroot_chmod(path->dentry, path->mnt, mode)) {
73421+ error = -EACCES;
73422+ goto out_unlock;
73423+ }
73424+
73425 error = security_path_chmod(path, mode);
73426 if (error)
73427 goto out_unlock;
73428@@ -570,6 +604,9 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
73429 uid = make_kuid(current_user_ns(), user);
73430 gid = make_kgid(current_user_ns(), group);
73431
73432+ if (!gr_acl_handle_chown(path->dentry, path->mnt))
73433+ return -EACCES;
73434+
73435 retry_deleg:
73436 newattrs.ia_valid = ATTR_CTIME;
73437 if (user != (uid_t) -1) {
73438@@ -1017,6 +1054,7 @@ long do_sys_open(int dfd, const char __user *filename, int flags, umode_t mode)
73439 } else {
73440 fsnotify_open(f);
73441 fd_install(fd, f);
73442+ trace_do_sys_open(tmp->name, flags, mode);
73443 }
73444 }
73445 putname(tmp);
73446diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
73447index bf8537c..c16ef7d 100644
73448--- a/fs/overlayfs/super.c
73449+++ b/fs/overlayfs/super.c
73450@@ -172,7 +172,7 @@ void ovl_path_lower(struct dentry *dentry, struct path *path)
73451 {
73452 struct ovl_entry *oe = dentry->d_fsdata;
73453
73454- *path = oe->numlower ? oe->lowerstack[0] : (struct path) { NULL, NULL };
73455+ *path = oe->numlower ? oe->lowerstack[0] : (struct path) { .dentry = NULL, .mnt = NULL };
73456 }
73457
73458 int ovl_want_write(struct dentry *dentry)
73459@@ -816,8 +816,8 @@ static unsigned int ovl_split_lowerdirs(char *str)
73460
73461 static int ovl_fill_super(struct super_block *sb, void *data, int silent)
73462 {
73463- struct path upperpath = { NULL, NULL };
73464- struct path workpath = { NULL, NULL };
73465+ struct path upperpath = { .dentry = NULL, .mnt = NULL };
73466+ struct path workpath = { .dentry = NULL, .mnt = NULL };
73467 struct dentry *root_dentry;
73468 struct ovl_entry *oe;
73469 struct ovl_fs *ufs;
73470diff --git a/fs/pipe.c b/fs/pipe.c
73471index 21981e5..2c0bffb 100644
73472--- a/fs/pipe.c
73473+++ b/fs/pipe.c
73474@@ -37,7 +37,7 @@ unsigned int pipe_max_size = 1048576;
73475 /*
73476 * Minimum pipe size, as required by POSIX
73477 */
73478-unsigned int pipe_min_size = PAGE_SIZE;
73479+unsigned int pipe_min_size __read_only = PAGE_SIZE;
73480
73481 /*
73482 * We use a start+len construction, which provides full use of the
73483@@ -56,7 +56,7 @@ unsigned int pipe_min_size = PAGE_SIZE;
73484
73485 static void pipe_lock_nested(struct pipe_inode_info *pipe, int subclass)
73486 {
73487- if (pipe->files)
73488+ if (atomic_read(&pipe->files))
73489 mutex_lock_nested(&pipe->mutex, subclass);
73490 }
73491
73492@@ -71,7 +71,7 @@ EXPORT_SYMBOL(pipe_lock);
73493
73494 void pipe_unlock(struct pipe_inode_info *pipe)
73495 {
73496- if (pipe->files)
73497+ if (atomic_read(&pipe->files))
73498 mutex_unlock(&pipe->mutex);
73499 }
73500 EXPORT_SYMBOL(pipe_unlock);
73501@@ -292,9 +292,9 @@ pipe_read(struct kiocb *iocb, struct iov_iter *to)
73502 }
73503 if (bufs) /* More to do? */
73504 continue;
73505- if (!pipe->writers)
73506+ if (!atomic_read(&pipe->writers))
73507 break;
73508- if (!pipe->waiting_writers) {
73509+ if (!atomic_read(&pipe->waiting_writers)) {
73510 /* syscall merging: Usually we must not sleep
73511 * if O_NONBLOCK is set, or if we got some data.
73512 * But if a writer sleeps in kernel space, then
73513@@ -351,7 +351,7 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
73514
73515 __pipe_lock(pipe);
73516
73517- if (!pipe->readers) {
73518+ if (!atomic_read(&pipe->readers)) {
73519 send_sig(SIGPIPE, current, 0);
73520 ret = -EPIPE;
73521 goto out;
73522@@ -387,7 +387,7 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
73523 for (;;) {
73524 int bufs;
73525
73526- if (!pipe->readers) {
73527+ if (!atomic_read(&pipe->readers)) {
73528 send_sig(SIGPIPE, current, 0);
73529 if (!ret)
73530 ret = -EPIPE;
73531@@ -455,9 +455,9 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
73532 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
73533 do_wakeup = 0;
73534 }
73535- pipe->waiting_writers++;
73536+ atomic_inc(&pipe->waiting_writers);
73537 pipe_wait(pipe);
73538- pipe->waiting_writers--;
73539+ atomic_dec(&pipe->waiting_writers);
73540 }
73541 out:
73542 __pipe_unlock(pipe);
73543@@ -512,7 +512,7 @@ pipe_poll(struct file *filp, poll_table *wait)
73544 mask = 0;
73545 if (filp->f_mode & FMODE_READ) {
73546 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
73547- if (!pipe->writers && filp->f_version != pipe->w_counter)
73548+ if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
73549 mask |= POLLHUP;
73550 }
73551
73552@@ -522,7 +522,7 @@ pipe_poll(struct file *filp, poll_table *wait)
73553 * Most Unices do not set POLLERR for FIFOs but on Linux they
73554 * behave exactly like pipes for poll().
73555 */
73556- if (!pipe->readers)
73557+ if (!atomic_read(&pipe->readers))
73558 mask |= POLLERR;
73559 }
73560
73561@@ -534,7 +534,7 @@ static void put_pipe_info(struct inode *inode, struct pipe_inode_info *pipe)
73562 int kill = 0;
73563
73564 spin_lock(&inode->i_lock);
73565- if (!--pipe->files) {
73566+ if (atomic_dec_and_test(&pipe->files)) {
73567 inode->i_pipe = NULL;
73568 kill = 1;
73569 }
73570@@ -551,11 +551,11 @@ pipe_release(struct inode *inode, struct file *file)
73571
73572 __pipe_lock(pipe);
73573 if (file->f_mode & FMODE_READ)
73574- pipe->readers--;
73575+ atomic_dec(&pipe->readers);
73576 if (file->f_mode & FMODE_WRITE)
73577- pipe->writers--;
73578+ atomic_dec(&pipe->writers);
73579
73580- if (pipe->readers || pipe->writers) {
73581+ if (atomic_read(&pipe->readers) || atomic_read(&pipe->writers)) {
73582 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
73583 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
73584 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
73585@@ -620,7 +620,7 @@ void free_pipe_info(struct pipe_inode_info *pipe)
73586 kfree(pipe);
73587 }
73588
73589-static struct vfsmount *pipe_mnt __read_mostly;
73590+struct vfsmount *pipe_mnt __read_mostly;
73591
73592 /*
73593 * pipefs_dname() is called from d_path().
73594@@ -650,8 +650,9 @@ static struct inode * get_pipe_inode(void)
73595 goto fail_iput;
73596
73597 inode->i_pipe = pipe;
73598- pipe->files = 2;
73599- pipe->readers = pipe->writers = 1;
73600+ atomic_set(&pipe->files, 2);
73601+ atomic_set(&pipe->readers, 1);
73602+ atomic_set(&pipe->writers, 1);
73603 inode->i_fop = &pipefifo_fops;
73604
73605 /*
73606@@ -830,17 +831,17 @@ static int fifo_open(struct inode *inode, struct file *filp)
73607 spin_lock(&inode->i_lock);
73608 if (inode->i_pipe) {
73609 pipe = inode->i_pipe;
73610- pipe->files++;
73611+ atomic_inc(&pipe->files);
73612 spin_unlock(&inode->i_lock);
73613 } else {
73614 spin_unlock(&inode->i_lock);
73615 pipe = alloc_pipe_info();
73616 if (!pipe)
73617 return -ENOMEM;
73618- pipe->files = 1;
73619+ atomic_set(&pipe->files, 1);
73620 spin_lock(&inode->i_lock);
73621 if (unlikely(inode->i_pipe)) {
73622- inode->i_pipe->files++;
73623+ atomic_inc(&inode->i_pipe->files);
73624 spin_unlock(&inode->i_lock);
73625 free_pipe_info(pipe);
73626 pipe = inode->i_pipe;
73627@@ -865,10 +866,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
73628 * opened, even when there is no process writing the FIFO.
73629 */
73630 pipe->r_counter++;
73631- if (pipe->readers++ == 0)
73632+ if (atomic_inc_return(&pipe->readers) == 1)
73633 wake_up_partner(pipe);
73634
73635- if (!is_pipe && !pipe->writers) {
73636+ if (!is_pipe && !atomic_read(&pipe->writers)) {
73637 if ((filp->f_flags & O_NONBLOCK)) {
73638 /* suppress POLLHUP until we have
73639 * seen a writer */
73640@@ -887,14 +888,14 @@ static int fifo_open(struct inode *inode, struct file *filp)
73641 * errno=ENXIO when there is no process reading the FIFO.
73642 */
73643 ret = -ENXIO;
73644- if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !pipe->readers)
73645+ if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
73646 goto err;
73647
73648 pipe->w_counter++;
73649- if (!pipe->writers++)
73650+ if (atomic_inc_return(&pipe->writers) == 1)
73651 wake_up_partner(pipe);
73652
73653- if (!is_pipe && !pipe->readers) {
73654+ if (!is_pipe && !atomic_read(&pipe->readers)) {
73655 if (wait_for_partner(pipe, &pipe->r_counter))
73656 goto err_wr;
73657 }
73658@@ -908,11 +909,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
73659 * the process can at least talk to itself.
73660 */
73661
73662- pipe->readers++;
73663- pipe->writers++;
73664+ atomic_inc(&pipe->readers);
73665+ atomic_inc(&pipe->writers);
73666 pipe->r_counter++;
73667 pipe->w_counter++;
73668- if (pipe->readers == 1 || pipe->writers == 1)
73669+ if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
73670 wake_up_partner(pipe);
73671 break;
73672
73673@@ -926,13 +927,13 @@ static int fifo_open(struct inode *inode, struct file *filp)
73674 return 0;
73675
73676 err_rd:
73677- if (!--pipe->readers)
73678+ if (atomic_dec_and_test(&pipe->readers))
73679 wake_up_interruptible(&pipe->wait);
73680 ret = -ERESTARTSYS;
73681 goto err;
73682
73683 err_wr:
73684- if (!--pipe->writers)
73685+ if (atomic_dec_and_test(&pipe->writers))
73686 wake_up_interruptible(&pipe->wait);
73687 ret = -ERESTARTSYS;
73688 goto err;
73689@@ -1010,7 +1011,7 @@ static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long nr_pages)
73690 * Currently we rely on the pipe array holding a power-of-2 number
73691 * of pages.
73692 */
73693-static inline unsigned int round_pipe_size(unsigned int size)
73694+static inline unsigned long round_pipe_size(unsigned long size)
73695 {
73696 unsigned long nr_pages;
73697
73698@@ -1058,13 +1059,16 @@ long pipe_fcntl(struct file *file, unsigned int cmd, unsigned long arg)
73699
73700 switch (cmd) {
73701 case F_SETPIPE_SZ: {
73702- unsigned int size, nr_pages;
73703+ unsigned long size, nr_pages;
73704+
73705+ ret = -EINVAL;
73706+ if (arg < pipe_min_size)
73707+ goto out;
73708
73709 size = round_pipe_size(arg);
73710 nr_pages = size >> PAGE_SHIFT;
73711
73712- ret = -EINVAL;
73713- if (!nr_pages)
73714+ if (size < pipe_min_size)
73715 goto out;
73716
73717 if (!capable(CAP_SYS_RESOURCE) && size > pipe_max_size) {
73718diff --git a/fs/posix_acl.c b/fs/posix_acl.c
73719index 3a48bb7..403067b 100644
73720--- a/fs/posix_acl.c
73721+++ b/fs/posix_acl.c
73722@@ -20,6 +20,7 @@
73723 #include <linux/xattr.h>
73724 #include <linux/export.h>
73725 #include <linux/user_namespace.h>
73726+#include <linux/grsecurity.h>
73727
73728 struct posix_acl **acl_by_type(struct inode *inode, int type)
73729 {
73730@@ -277,7 +278,7 @@ posix_acl_equiv_mode(const struct posix_acl *acl, umode_t *mode_p)
73731 }
73732 }
73733 if (mode_p)
73734- *mode_p = (*mode_p & ~S_IRWXUGO) | mode;
73735+ *mode_p = ((*mode_p & ~S_IRWXUGO) | mode) & ~gr_acl_umask();
73736 return not_equiv;
73737 }
73738 EXPORT_SYMBOL(posix_acl_equiv_mode);
73739@@ -427,7 +428,7 @@ static int posix_acl_create_masq(struct posix_acl *acl, umode_t *mode_p)
73740 mode &= (group_obj->e_perm << 3) | ~S_IRWXG;
73741 }
73742
73743- *mode_p = (*mode_p & ~S_IRWXUGO) | mode;
73744+ *mode_p = ((*mode_p & ~S_IRWXUGO) | mode) & ~gr_acl_umask();
73745 return not_equiv;
73746 }
73747
73748@@ -485,6 +486,8 @@ __posix_acl_create(struct posix_acl **acl, gfp_t gfp, umode_t *mode_p)
73749 struct posix_acl *clone = posix_acl_clone(*acl, gfp);
73750 int err = -ENOMEM;
73751 if (clone) {
73752+ *mode_p &= ~gr_acl_umask();
73753+
73754 err = posix_acl_create_masq(clone, mode_p);
73755 if (err < 0) {
73756 posix_acl_release(clone);
73757@@ -663,11 +666,12 @@ struct posix_acl *
73758 posix_acl_from_xattr(struct user_namespace *user_ns,
73759 const void *value, size_t size)
73760 {
73761- posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
73762- posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
73763+ const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
73764+ const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
73765 int count;
73766 struct posix_acl *acl;
73767 struct posix_acl_entry *acl_e;
73768+ umode_t umask = gr_acl_umask();
73769
73770 if (!value)
73771 return NULL;
73772@@ -693,12 +697,18 @@ posix_acl_from_xattr(struct user_namespace *user_ns,
73773
73774 switch(acl_e->e_tag) {
73775 case ACL_USER_OBJ:
73776+ acl_e->e_perm &= ~((umask & S_IRWXU) >> 6);
73777+ break;
73778 case ACL_GROUP_OBJ:
73779 case ACL_MASK:
73780+ acl_e->e_perm &= ~((umask & S_IRWXG) >> 3);
73781+ break;
73782 case ACL_OTHER:
73783+ acl_e->e_perm &= ~(umask & S_IRWXO);
73784 break;
73785
73786 case ACL_USER:
73787+ acl_e->e_perm &= ~((umask & S_IRWXU) >> 6);
73788 acl_e->e_uid =
73789 make_kuid(user_ns,
73790 le32_to_cpu(entry->e_id));
73791@@ -706,6 +716,7 @@ posix_acl_from_xattr(struct user_namespace *user_ns,
73792 goto fail;
73793 break;
73794 case ACL_GROUP:
73795+ acl_e->e_perm &= ~((umask & S_IRWXG) >> 3);
73796 acl_e->e_gid =
73797 make_kgid(user_ns,
73798 le32_to_cpu(entry->e_id));
73799diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
73800index 2183fcf..3c32a98 100644
73801--- a/fs/proc/Kconfig
73802+++ b/fs/proc/Kconfig
73803@@ -30,7 +30,7 @@ config PROC_FS
73804
73805 config PROC_KCORE
73806 bool "/proc/kcore support" if !ARM
73807- depends on PROC_FS && MMU
73808+ depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
73809 help
73810 Provides a virtual ELF core file of the live kernel. This can
73811 be read with gdb and other ELF tools. No modifications can be
73812@@ -38,8 +38,8 @@ config PROC_KCORE
73813
73814 config PROC_VMCORE
73815 bool "/proc/vmcore support"
73816- depends on PROC_FS && CRASH_DUMP
73817- default y
73818+ depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
73819+ default n
73820 help
73821 Exports the dump image of crashed kernel in ELF format.
73822
73823@@ -63,8 +63,8 @@ config PROC_SYSCTL
73824 limited in memory.
73825
73826 config PROC_PAGE_MONITOR
73827- default y
73828- depends on PROC_FS && MMU
73829+ default n
73830+ depends on PROC_FS && MMU && !GRKERNSEC
73831 bool "Enable /proc page monitoring" if EXPERT
73832 help
73833 Various /proc files exist to monitor process memory utilization:
73834diff --git a/fs/proc/array.c b/fs/proc/array.c
73835index 1295a00..4c91a6b 100644
73836--- a/fs/proc/array.c
73837+++ b/fs/proc/array.c
73838@@ -60,6 +60,7 @@
73839 #include <linux/tty.h>
73840 #include <linux/string.h>
73841 #include <linux/mman.h>
73842+#include <linux/grsecurity.h>
73843 #include <linux/proc_fs.h>
73844 #include <linux/ioport.h>
73845 #include <linux/uaccess.h>
73846@@ -322,6 +323,21 @@ static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
73847 cpumask_pr_args(&task->cpus_allowed));
73848 }
73849
73850+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
73851+static inline void task_pax(struct seq_file *m, struct task_struct *p)
73852+{
73853+ if (p->mm)
73854+ seq_printf(m, "PaX:\t%c%c%c%c%c\n",
73855+ p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
73856+ p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
73857+ p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
73858+ p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
73859+ p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
73860+ else
73861+ seq_printf(m, "PaX:\t-----\n");
73862+}
73863+#endif
73864+
73865 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
73866 struct pid *pid, struct task_struct *task)
73867 {
73868@@ -340,9 +356,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
73869 task_cpus_allowed(m, task);
73870 cpuset_task_status_allowed(m, task);
73871 task_context_switch_counts(m, task);
73872+
73873+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
73874+ task_pax(m, task);
73875+#endif
73876+
73877+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
73878+ task_grsec_rbac(m, task);
73879+#endif
73880+
73881 return 0;
73882 }
73883
73884+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
73885+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
73886+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
73887+ _mm->pax_flags & MF_PAX_SEGMEXEC))
73888+#endif
73889+
73890 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
73891 struct pid *pid, struct task_struct *task, int whole)
73892 {
73893@@ -364,6 +395,13 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
73894 char tcomm[sizeof(task->comm)];
73895 unsigned long flags;
73896
73897+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
73898+ if (current->exec_id != m->exec_id) {
73899+ gr_log_badprocpid("stat");
73900+ return 0;
73901+ }
73902+#endif
73903+
73904 state = *get_task_state(task);
73905 vsize = eip = esp = 0;
73906 permitted = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
73907@@ -434,6 +472,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
73908 gtime = task_gtime(task);
73909 }
73910
73911+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
73912+ if (PAX_RAND_FLAGS(mm)) {
73913+ eip = 0;
73914+ esp = 0;
73915+ wchan = 0;
73916+ }
73917+#endif
73918+#ifdef CONFIG_GRKERNSEC_HIDESYM
73919+ wchan = 0;
73920+ eip =0;
73921+ esp =0;
73922+#endif
73923+
73924 /* scale priority and nice values from timeslices to -20..20 */
73925 /* to make it look like a "normal" Unix priority/nice value */
73926 priority = task_prio(task);
73927@@ -465,9 +516,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
73928 seq_put_decimal_ull(m, ' ', vsize);
73929 seq_put_decimal_ull(m, ' ', mm ? get_mm_rss(mm) : 0);
73930 seq_put_decimal_ull(m, ' ', rsslim);
73931+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
73932+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0));
73933+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0));
73934+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0));
73935+#else
73936 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->start_code : 1) : 0);
73937 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->end_code : 1) : 0);
73938 seq_put_decimal_ull(m, ' ', (permitted && mm) ? mm->start_stack : 0);
73939+#endif
73940 seq_put_decimal_ull(m, ' ', esp);
73941 seq_put_decimal_ull(m, ' ', eip);
73942 /* The signal information here is obsolete.
73943@@ -489,7 +546,11 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
73944 seq_put_decimal_ull(m, ' ', cputime_to_clock_t(gtime));
73945 seq_put_decimal_ll(m, ' ', cputime_to_clock_t(cgtime));
73946
73947- if (mm && permitted) {
73948+ if (mm && permitted
73949+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
73950+ && !PAX_RAND_FLAGS(mm)
73951+#endif
73952+ ) {
73953 seq_put_decimal_ull(m, ' ', mm->start_data);
73954 seq_put_decimal_ull(m, ' ', mm->end_data);
73955 seq_put_decimal_ull(m, ' ', mm->start_brk);
73956@@ -527,8 +588,15 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
73957 struct pid *pid, struct task_struct *task)
73958 {
73959 unsigned long size = 0, resident = 0, shared = 0, text = 0, data = 0;
73960- struct mm_struct *mm = get_task_mm(task);
73961+ struct mm_struct *mm;
73962
73963+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
73964+ if (current->exec_id != m->exec_id) {
73965+ gr_log_badprocpid("statm");
73966+ return 0;
73967+ }
73968+#endif
73969+ mm = get_task_mm(task);
73970 if (mm) {
73971 size = task_statm(mm, &shared, &text, &data, &resident);
73972 mmput(mm);
73973@@ -551,6 +619,20 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
73974 return 0;
73975 }
73976
73977+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
73978+int proc_pid_ipaddr(struct seq_file *m, struct pid_namespace *ns, struct pid *pid, struct task_struct *task)
73979+{
73980+ unsigned long flags;
73981+ u32 curr_ip = 0;
73982+
73983+ if (lock_task_sighand(task, &flags)) {
73984+ curr_ip = task->signal->curr_ip;
73985+ unlock_task_sighand(task, &flags);
73986+ }
73987+ return seq_printf(m, "%pI4\n", &curr_ip);
73988+}
73989+#endif
73990+
73991 #ifdef CONFIG_CHECKPOINT_RESTORE
73992 static struct pid *
73993 get_children_pid(struct inode *inode, struct pid *pid_prev, loff_t pos)
73994diff --git a/fs/proc/base.c b/fs/proc/base.c
73995index 3f3d7ae..68de109 100644
73996--- a/fs/proc/base.c
73997+++ b/fs/proc/base.c
73998@@ -113,6 +113,14 @@ struct pid_entry {
73999 union proc_op op;
74000 };
74001
74002+struct getdents_callback {
74003+ struct linux_dirent __user * current_dir;
74004+ struct linux_dirent __user * previous;
74005+ struct file * file;
74006+ int count;
74007+ int error;
74008+};
74009+
74010 #define NOD(NAME, MODE, IOP, FOP, OP) { \
74011 .name = (NAME), \
74012 .len = sizeof(NAME) - 1, \
74013@@ -208,12 +216,28 @@ static int proc_pid_cmdline(struct seq_file *m, struct pid_namespace *ns,
74014 return 0;
74015 }
74016
74017+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
74018+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
74019+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
74020+ _mm->pax_flags & MF_PAX_SEGMEXEC))
74021+#endif
74022+
74023 static int proc_pid_auxv(struct seq_file *m, struct pid_namespace *ns,
74024 struct pid *pid, struct task_struct *task)
74025 {
74026 struct mm_struct *mm = mm_access(task, PTRACE_MODE_READ);
74027 if (mm && !IS_ERR(mm)) {
74028 unsigned int nwords = 0;
74029+
74030+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
74031+ /* allow if we're currently ptracing this task */
74032+ if (PAX_RAND_FLAGS(mm) &&
74033+ (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
74034+ mmput(mm);
74035+ return 0;
74036+ }
74037+#endif
74038+
74039 do {
74040 nwords += 2;
74041 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
74042@@ -225,7 +249,7 @@ static int proc_pid_auxv(struct seq_file *m, struct pid_namespace *ns,
74043 }
74044
74045
74046-#ifdef CONFIG_KALLSYMS
74047+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
74048 /*
74049 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
74050 * Returns the resolved symbol. If that fails, simply return the address.
74051@@ -265,7 +289,7 @@ static void unlock_trace(struct task_struct *task)
74052 mutex_unlock(&task->signal->cred_guard_mutex);
74053 }
74054
74055-#ifdef CONFIG_STACKTRACE
74056+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
74057
74058 #define MAX_STACK_TRACE_DEPTH 64
74059
74060@@ -456,7 +480,7 @@ static int proc_pid_limits(struct seq_file *m, struct pid_namespace *ns,
74061 return 0;
74062 }
74063
74064-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
74065+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
74066 static int proc_pid_syscall(struct seq_file *m, struct pid_namespace *ns,
74067 struct pid *pid, struct task_struct *task)
74068 {
74069@@ -486,7 +510,7 @@ static int proc_pid_syscall(struct seq_file *m, struct pid_namespace *ns,
74070 /************************************************************************/
74071
74072 /* permission checks */
74073-static int proc_fd_access_allowed(struct inode *inode)
74074+static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
74075 {
74076 struct task_struct *task;
74077 int allowed = 0;
74078@@ -496,7 +520,10 @@ static int proc_fd_access_allowed(struct inode *inode)
74079 */
74080 task = get_proc_task(inode);
74081 if (task) {
74082- allowed = ptrace_may_access(task, PTRACE_MODE_READ);
74083+ if (log)
74084+ allowed = ptrace_may_access(task, PTRACE_MODE_READ);
74085+ else
74086+ allowed = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
74087 put_task_struct(task);
74088 }
74089 return allowed;
74090@@ -527,10 +554,35 @@ static bool has_pid_permissions(struct pid_namespace *pid,
74091 struct task_struct *task,
74092 int hide_pid_min)
74093 {
74094+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
74095+ return false;
74096+
74097+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
74098+ rcu_read_lock();
74099+ {
74100+ const struct cred *tmpcred = current_cred();
74101+ const struct cred *cred = __task_cred(task);
74102+
74103+ if (uid_eq(tmpcred->uid, GLOBAL_ROOT_UID) || uid_eq(tmpcred->uid, cred->uid)
74104+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
74105+ || in_group_p(grsec_proc_gid)
74106+#endif
74107+ ) {
74108+ rcu_read_unlock();
74109+ return true;
74110+ }
74111+ }
74112+ rcu_read_unlock();
74113+
74114+ if (!pid->hide_pid)
74115+ return false;
74116+#endif
74117+
74118 if (pid->hide_pid < hide_pid_min)
74119 return true;
74120 if (in_group_p(pid->pid_gid))
74121 return true;
74122+
74123 return ptrace_may_access(task, PTRACE_MODE_READ);
74124 }
74125
74126@@ -548,7 +600,11 @@ static int proc_pid_permission(struct inode *inode, int mask)
74127 put_task_struct(task);
74128
74129 if (!has_perms) {
74130+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
74131+ {
74132+#else
74133 if (pid->hide_pid == 2) {
74134+#endif
74135 /*
74136 * Let's make getdents(), stat(), and open()
74137 * consistent with each other. If a process
74138@@ -609,6 +665,10 @@ struct mm_struct *proc_mem_open(struct inode *inode, unsigned int mode)
74139
74140 if (task) {
74141 mm = mm_access(task, mode);
74142+ if (!IS_ERR_OR_NULL(mm) && gr_acl_handle_procpidmem(task)) {
74143+ mmput(mm);
74144+ mm = ERR_PTR(-EPERM);
74145+ }
74146 put_task_struct(task);
74147
74148 if (!IS_ERR_OR_NULL(mm)) {
74149@@ -630,6 +690,11 @@ static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
74150 return PTR_ERR(mm);
74151
74152 file->private_data = mm;
74153+
74154+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
74155+ file->f_version = current->exec_id;
74156+#endif
74157+
74158 return 0;
74159 }
74160
74161@@ -651,6 +716,17 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
74162 ssize_t copied;
74163 char *page;
74164
74165+#ifdef CONFIG_GRKERNSEC
74166+ if (write)
74167+ return -EPERM;
74168+#endif
74169+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
74170+ if (file->f_version != current->exec_id) {
74171+ gr_log_badprocpid("mem");
74172+ return 0;
74173+ }
74174+#endif
74175+
74176 if (!mm)
74177 return 0;
74178
74179@@ -663,7 +739,7 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
74180 goto free;
74181
74182 while (count > 0) {
74183- int this_len = min_t(int, count, PAGE_SIZE);
74184+ ssize_t this_len = min_t(ssize_t, count, PAGE_SIZE);
74185
74186 if (write && copy_from_user(page, buf, this_len)) {
74187 copied = -EFAULT;
74188@@ -755,6 +831,13 @@ static ssize_t environ_read(struct file *file, char __user *buf,
74189 if (!mm)
74190 return 0;
74191
74192+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
74193+ if (file->f_version != current->exec_id) {
74194+ gr_log_badprocpid("environ");
74195+ return 0;
74196+ }
74197+#endif
74198+
74199 page = (char *)__get_free_page(GFP_TEMPORARY);
74200 if (!page)
74201 return -ENOMEM;
74202@@ -764,7 +847,7 @@ static ssize_t environ_read(struct file *file, char __user *buf,
74203 goto free;
74204 while (count > 0) {
74205 size_t this_len, max_len;
74206- int retval;
74207+ ssize_t retval;
74208
74209 if (src >= (mm->env_end - mm->env_start))
74210 break;
74211@@ -1378,7 +1461,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
74212 int error = -EACCES;
74213
74214 /* Are we allowed to snoop on the tasks file descriptors? */
74215- if (!proc_fd_access_allowed(inode))
74216+ if (!proc_fd_access_allowed(inode, 0))
74217 goto out;
74218
74219 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
74220@@ -1422,8 +1505,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
74221 struct path path;
74222
74223 /* Are we allowed to snoop on the tasks file descriptors? */
74224- if (!proc_fd_access_allowed(inode))
74225- goto out;
74226+ /* logging this is needed for learning on chromium to work properly,
74227+ but we don't want to flood the logs from 'ps' which does a readlink
74228+ on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
74229+ CAP_SYS_PTRACE as it's not necessary for its basic functionality
74230+ */
74231+ if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
74232+ if (!proc_fd_access_allowed(inode,0))
74233+ goto out;
74234+ } else {
74235+ if (!proc_fd_access_allowed(inode,1))
74236+ goto out;
74237+ }
74238
74239 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
74240 if (error)
74241@@ -1473,7 +1566,11 @@ struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *t
74242 rcu_read_lock();
74243 cred = __task_cred(task);
74244 inode->i_uid = cred->euid;
74245+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
74246+ inode->i_gid = grsec_proc_gid;
74247+#else
74248 inode->i_gid = cred->egid;
74249+#endif
74250 rcu_read_unlock();
74251 }
74252 security_task_to_inode(task, inode);
74253@@ -1509,10 +1606,19 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
74254 return -ENOENT;
74255 }
74256 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
74257+#ifdef CONFIG_GRKERNSEC_PROC_USER
74258+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
74259+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
74260+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
74261+#endif
74262 task_dumpable(task)) {
74263 cred = __task_cred(task);
74264 stat->uid = cred->euid;
74265+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
74266+ stat->gid = grsec_proc_gid;
74267+#else
74268 stat->gid = cred->egid;
74269+#endif
74270 }
74271 }
74272 rcu_read_unlock();
74273@@ -1550,11 +1656,20 @@ int pid_revalidate(struct dentry *dentry, unsigned int flags)
74274
74275 if (task) {
74276 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
74277+#ifdef CONFIG_GRKERNSEC_PROC_USER
74278+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
74279+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
74280+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
74281+#endif
74282 task_dumpable(task)) {
74283 rcu_read_lock();
74284 cred = __task_cred(task);
74285 inode->i_uid = cred->euid;
74286+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
74287+ inode->i_gid = grsec_proc_gid;
74288+#else
74289 inode->i_gid = cred->egid;
74290+#endif
74291 rcu_read_unlock();
74292 } else {
74293 inode->i_uid = GLOBAL_ROOT_UID;
74294@@ -2085,6 +2200,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
74295 if (!task)
74296 goto out_no_task;
74297
74298+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
74299+ goto out;
74300+
74301 /*
74302 * Yes, it does not scale. And it should not. Don't add
74303 * new entries into /proc/<tgid>/ without very good reasons.
74304@@ -2115,6 +2233,9 @@ static int proc_pident_readdir(struct file *file, struct dir_context *ctx,
74305 if (!task)
74306 return -ENOENT;
74307
74308+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
74309+ goto out;
74310+
74311 if (!dir_emit_dots(file, ctx))
74312 goto out;
74313
74314@@ -2557,7 +2678,7 @@ static const struct pid_entry tgid_base_stuff[] = {
74315 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
74316 #endif
74317 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
74318-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
74319+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
74320 ONE("syscall", S_IRUSR, proc_pid_syscall),
74321 #endif
74322 ONE("cmdline", S_IRUGO, proc_pid_cmdline),
74323@@ -2582,10 +2703,10 @@ static const struct pid_entry tgid_base_stuff[] = {
74324 #ifdef CONFIG_SECURITY
74325 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
74326 #endif
74327-#ifdef CONFIG_KALLSYMS
74328+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
74329 ONE("wchan", S_IRUGO, proc_pid_wchan),
74330 #endif
74331-#ifdef CONFIG_STACKTRACE
74332+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
74333 ONE("stack", S_IRUSR, proc_pid_stack),
74334 #endif
74335 #ifdef CONFIG_SCHEDSTATS
74336@@ -2619,6 +2740,9 @@ static const struct pid_entry tgid_base_stuff[] = {
74337 #ifdef CONFIG_HARDWALL
74338 ONE("hardwall", S_IRUGO, proc_pid_hardwall),
74339 #endif
74340+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
74341+ ONE("ipaddr", S_IRUSR, proc_pid_ipaddr),
74342+#endif
74343 #ifdef CONFIG_USER_NS
74344 REG("uid_map", S_IRUGO|S_IWUSR, proc_uid_map_operations),
74345 REG("gid_map", S_IRUGO|S_IWUSR, proc_gid_map_operations),
74346@@ -2751,7 +2875,14 @@ static int proc_pid_instantiate(struct inode *dir,
74347 if (!inode)
74348 goto out;
74349
74350+#ifdef CONFIG_GRKERNSEC_PROC_USER
74351+ inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
74352+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
74353+ inode->i_gid = grsec_proc_gid;
74354+ inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
74355+#else
74356 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
74357+#endif
74358 inode->i_op = &proc_tgid_base_inode_operations;
74359 inode->i_fop = &proc_tgid_base_operations;
74360 inode->i_flags|=S_IMMUTABLE;
74361@@ -2789,7 +2920,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, unsign
74362 if (!task)
74363 goto out;
74364
74365+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
74366+ goto out_put_task;
74367+
74368 result = proc_pid_instantiate(dir, dentry, task, NULL);
74369+out_put_task:
74370 put_task_struct(task);
74371 out:
74372 return ERR_PTR(result);
74373@@ -2903,7 +3038,7 @@ static const struct pid_entry tid_base_stuff[] = {
74374 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
74375 #endif
74376 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
74377-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
74378+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
74379 ONE("syscall", S_IRUSR, proc_pid_syscall),
74380 #endif
74381 ONE("cmdline", S_IRUGO, proc_pid_cmdline),
74382@@ -2930,10 +3065,10 @@ static const struct pid_entry tid_base_stuff[] = {
74383 #ifdef CONFIG_SECURITY
74384 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
74385 #endif
74386-#ifdef CONFIG_KALLSYMS
74387+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
74388 ONE("wchan", S_IRUGO, proc_pid_wchan),
74389 #endif
74390-#ifdef CONFIG_STACKTRACE
74391+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
74392 ONE("stack", S_IRUSR, proc_pid_stack),
74393 #endif
74394 #ifdef CONFIG_SCHEDSTATS
74395diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c
74396index cbd82df..c0407d2 100644
74397--- a/fs/proc/cmdline.c
74398+++ b/fs/proc/cmdline.c
74399@@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = {
74400
74401 static int __init proc_cmdline_init(void)
74402 {
74403+#ifdef CONFIG_GRKERNSEC_PROC_ADD
74404+ proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
74405+#else
74406 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
74407+#endif
74408 return 0;
74409 }
74410 fs_initcall(proc_cmdline_init);
74411diff --git a/fs/proc/devices.c b/fs/proc/devices.c
74412index 50493ed..248166b 100644
74413--- a/fs/proc/devices.c
74414+++ b/fs/proc/devices.c
74415@@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = {
74416
74417 static int __init proc_devices_init(void)
74418 {
74419+#ifdef CONFIG_GRKERNSEC_PROC_ADD
74420+ proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
74421+#else
74422 proc_create("devices", 0, NULL, &proc_devinfo_operations);
74423+#endif
74424 return 0;
74425 }
74426 fs_initcall(proc_devices_init);
74427diff --git a/fs/proc/fd.c b/fs/proc/fd.c
74428index 8e5ad83..1f07a8c 100644
74429--- a/fs/proc/fd.c
74430+++ b/fs/proc/fd.c
74431@@ -26,7 +26,8 @@ static int seq_show(struct seq_file *m, void *v)
74432 if (!task)
74433 return -ENOENT;
74434
74435- files = get_files_struct(task);
74436+ if (!gr_acl_handle_procpidmem(task))
74437+ files = get_files_struct(task);
74438 put_task_struct(task);
74439
74440 if (files) {
74441@@ -284,11 +285,21 @@ static struct dentry *proc_lookupfd(struct inode *dir, struct dentry *dentry,
74442 */
74443 int proc_fd_permission(struct inode *inode, int mask)
74444 {
74445+ struct task_struct *task;
74446 int rv = generic_permission(inode, mask);
74447- if (rv == 0)
74448- return 0;
74449+
74450 if (task_tgid(current) == proc_pid(inode))
74451 rv = 0;
74452+
74453+ task = get_proc_task(inode);
74454+ if (task == NULL)
74455+ return rv;
74456+
74457+ if (gr_acl_handle_procpidmem(task))
74458+ rv = -EACCES;
74459+
74460+ put_task_struct(task);
74461+
74462 return rv;
74463 }
74464
74465diff --git a/fs/proc/generic.c b/fs/proc/generic.c
74466index be65b20..2998ba8 100644
74467--- a/fs/proc/generic.c
74468+++ b/fs/proc/generic.c
74469@@ -22,6 +22,7 @@
74470 #include <linux/bitops.h>
74471 #include <linux/spinlock.h>
74472 #include <linux/completion.h>
74473+#include <linux/grsecurity.h>
74474 #include <asm/uaccess.h>
74475
74476 #include "internal.h"
74477@@ -253,6 +254,15 @@ struct dentry *proc_lookup(struct inode *dir, struct dentry *dentry,
74478 return proc_lookup_de(PDE(dir), dir, dentry);
74479 }
74480
74481+struct dentry *proc_lookup_restrict(struct inode *dir, struct dentry *dentry,
74482+ unsigned int flags)
74483+{
74484+ if (gr_proc_is_restricted())
74485+ return ERR_PTR(-EACCES);
74486+
74487+ return proc_lookup_de(PDE(dir), dir, dentry);
74488+}
74489+
74490 /*
74491 * This returns non-zero if at EOF, so that the /proc
74492 * root directory can use this and check if it should
74493@@ -310,6 +320,16 @@ int proc_readdir(struct file *file, struct dir_context *ctx)
74494 return proc_readdir_de(PDE(inode), file, ctx);
74495 }
74496
74497+int proc_readdir_restrict(struct file *file, struct dir_context *ctx)
74498+{
74499+ struct inode *inode = file_inode(file);
74500+
74501+ if (gr_proc_is_restricted())
74502+ return -EACCES;
74503+
74504+ return proc_readdir_de(PDE(inode), file, ctx);
74505+}
74506+
74507 /*
74508 * These are the generic /proc directory operations. They
74509 * use the in-memory "struct proc_dir_entry" tree to parse
74510@@ -321,6 +341,12 @@ static const struct file_operations proc_dir_operations = {
74511 .iterate = proc_readdir,
74512 };
74513
74514+static const struct file_operations proc_dir_restricted_operations = {
74515+ .llseek = generic_file_llseek,
74516+ .read = generic_read_dir,
74517+ .iterate = proc_readdir_restrict,
74518+};
74519+
74520 /*
74521 * proc directories can do almost nothing..
74522 */
74523@@ -330,6 +356,12 @@ static const struct inode_operations proc_dir_inode_operations = {
74524 .setattr = proc_notify_change,
74525 };
74526
74527+static const struct inode_operations proc_dir_restricted_inode_operations = {
74528+ .lookup = proc_lookup_restrict,
74529+ .getattr = proc_getattr,
74530+ .setattr = proc_notify_change,
74531+};
74532+
74533 static int proc_register(struct proc_dir_entry * dir, struct proc_dir_entry * dp)
74534 {
74535 int ret;
74536@@ -441,6 +473,31 @@ struct proc_dir_entry *proc_mkdir_data(const char *name, umode_t mode,
74537 }
74538 EXPORT_SYMBOL_GPL(proc_mkdir_data);
74539
74540+struct proc_dir_entry *proc_mkdir_data_restrict(const char *name, umode_t mode,
74541+ struct proc_dir_entry *parent, void *data)
74542+{
74543+ struct proc_dir_entry *ent;
74544+
74545+ if (mode == 0)
74546+ mode = S_IRUGO | S_IXUGO;
74547+
74548+ ent = __proc_create(&parent, name, S_IFDIR | mode, 2);
74549+ if (ent) {
74550+ ent->data = data;
74551+ ent->restricted = 1;
74552+ ent->proc_fops = &proc_dir_restricted_operations;
74553+ ent->proc_iops = &proc_dir_restricted_inode_operations;
74554+ parent->nlink++;
74555+ if (proc_register(parent, ent) < 0) {
74556+ kfree(ent);
74557+ parent->nlink--;
74558+ ent = NULL;
74559+ }
74560+ }
74561+ return ent;
74562+}
74563+EXPORT_SYMBOL_GPL(proc_mkdir_data_restrict);
74564+
74565 struct proc_dir_entry *proc_mkdir_mode(const char *name, umode_t mode,
74566 struct proc_dir_entry *parent)
74567 {
74568@@ -455,6 +512,13 @@ struct proc_dir_entry *proc_mkdir(const char *name,
74569 }
74570 EXPORT_SYMBOL(proc_mkdir);
74571
74572+struct proc_dir_entry *proc_mkdir_restrict(const char *name,
74573+ struct proc_dir_entry *parent)
74574+{
74575+ return proc_mkdir_data_restrict(name, 0, parent, NULL);
74576+}
74577+EXPORT_SYMBOL(proc_mkdir_restrict);
74578+
74579 struct proc_dir_entry *proc_create_data(const char *name, umode_t mode,
74580 struct proc_dir_entry *parent,
74581 const struct file_operations *proc_fops,
74582diff --git a/fs/proc/inode.c b/fs/proc/inode.c
74583index 7697b66..8d8e541 100644
74584--- a/fs/proc/inode.c
74585+++ b/fs/proc/inode.c
74586@@ -24,11 +24,17 @@
74587 #include <linux/mount.h>
74588 #include <linux/magic.h>
74589 #include <linux/namei.h>
74590+#include <linux/grsecurity.h>
74591
74592 #include <asm/uaccess.h>
74593
74594 #include "internal.h"
74595
74596+#ifdef CONFIG_PROC_SYSCTL
74597+extern const struct inode_operations proc_sys_inode_operations;
74598+extern const struct inode_operations proc_sys_dir_operations;
74599+#endif
74600+
74601 static void proc_evict_inode(struct inode *inode)
74602 {
74603 struct proc_dir_entry *de;
74604@@ -49,6 +55,13 @@ static void proc_evict_inode(struct inode *inode)
74605 RCU_INIT_POINTER(PROC_I(inode)->sysctl, NULL);
74606 sysctl_head_put(head);
74607 }
74608+
74609+#ifdef CONFIG_PROC_SYSCTL
74610+ if (inode->i_op == &proc_sys_inode_operations ||
74611+ inode->i_op == &proc_sys_dir_operations)
74612+ gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
74613+#endif
74614+
74615 }
74616
74617 static struct kmem_cache * proc_inode_cachep;
74618@@ -426,7 +439,11 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
74619 if (de->mode) {
74620 inode->i_mode = de->mode;
74621 inode->i_uid = de->uid;
74622+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
74623+ inode->i_gid = grsec_proc_gid;
74624+#else
74625 inode->i_gid = de->gid;
74626+#endif
74627 }
74628 if (de->size)
74629 inode->i_size = de->size;
74630diff --git a/fs/proc/internal.h b/fs/proc/internal.h
74631index c835b94..c9e01a3 100644
74632--- a/fs/proc/internal.h
74633+++ b/fs/proc/internal.h
74634@@ -47,9 +47,10 @@ struct proc_dir_entry {
74635 struct completion *pde_unload_completion;
74636 struct list_head pde_openers; /* who did ->open, but not ->release */
74637 spinlock_t pde_unload_lock; /* proc_fops checks and pde_users bumps */
74638+ u8 restricted; /* a directory in /proc/net that should be restricted via GRKERNSEC_PROC */
74639 u8 namelen;
74640 char name[];
74641-};
74642+} __randomize_layout;
74643
74644 union proc_op {
74645 int (*proc_get_link)(struct dentry *, struct path *);
74646@@ -67,7 +68,7 @@ struct proc_inode {
74647 struct ctl_table *sysctl_entry;
74648 const struct proc_ns_operations *ns_ops;
74649 struct inode vfs_inode;
74650-};
74651+} __randomize_layout;
74652
74653 /*
74654 * General functions
74655@@ -155,6 +156,10 @@ extern int proc_pid_status(struct seq_file *, struct pid_namespace *,
74656 struct pid *, struct task_struct *);
74657 extern int proc_pid_statm(struct seq_file *, struct pid_namespace *,
74658 struct pid *, struct task_struct *);
74659+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
74660+extern int proc_pid_ipaddr(struct seq_file *, struct pid_namespace *,
74661+ struct pid *, struct task_struct *);
74662+#endif
74663
74664 /*
74665 * base.c
74666@@ -179,9 +184,11 @@ extern bool proc_fill_cache(struct file *, struct dir_context *, const char *, i
74667 * generic.c
74668 */
74669 extern struct dentry *proc_lookup(struct inode *, struct dentry *, unsigned int);
74670+extern struct dentry *proc_lookup_restrict(struct inode *, struct dentry *, unsigned int);
74671 extern struct dentry *proc_lookup_de(struct proc_dir_entry *, struct inode *,
74672 struct dentry *);
74673 extern int proc_readdir(struct file *, struct dir_context *);
74674+extern int proc_readdir_restrict(struct file *, struct dir_context *);
74675 extern int proc_readdir_de(struct proc_dir_entry *, struct file *, struct dir_context *);
74676
74677 static inline struct proc_dir_entry *pde_get(struct proc_dir_entry *pde)
74678diff --git a/fs/proc/interrupts.c b/fs/proc/interrupts.c
74679index a352d57..cb94a5c 100644
74680--- a/fs/proc/interrupts.c
74681+++ b/fs/proc/interrupts.c
74682@@ -47,7 +47,11 @@ static const struct file_operations proc_interrupts_operations = {
74683
74684 static int __init proc_interrupts_init(void)
74685 {
74686+#ifdef CONFIG_GRKERNSEC_PROC_ADD
74687+ proc_create_grsec("interrupts", 0, NULL, &proc_interrupts_operations);
74688+#else
74689 proc_create("interrupts", 0, NULL, &proc_interrupts_operations);
74690+#endif
74691 return 0;
74692 }
74693 fs_initcall(proc_interrupts_init);
74694diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
74695index 91a4e64..14bf8fa 100644
74696--- a/fs/proc/kcore.c
74697+++ b/fs/proc/kcore.c
74698@@ -483,9 +483,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
74699 * the addresses in the elf_phdr on our list.
74700 */
74701 start = kc_offset_to_vaddr(*fpos - elf_buflen);
74702- if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
74703+ tsz = PAGE_SIZE - (start & ~PAGE_MASK);
74704+ if (tsz > buflen)
74705 tsz = buflen;
74706-
74707+
74708 while (buflen) {
74709 struct kcore_list *m;
74710
74711@@ -515,19 +516,20 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
74712 } else {
74713 if (kern_addr_valid(start)) {
74714 unsigned long n;
74715+ char *elf_buf;
74716+ mm_segment_t oldfs;
74717
74718- n = copy_to_user(buffer, (char *)start, tsz);
74719- /*
74720- * We cannot distinguish between fault on source
74721- * and fault on destination. When this happens
74722- * we clear too and hope it will trigger the
74723- * EFAULT again.
74724- */
74725- if (n) {
74726- if (clear_user(buffer + tsz - n,
74727- n))
74728- return -EFAULT;
74729- }
74730+ elf_buf = kzalloc(tsz, GFP_KERNEL);
74731+ if (!elf_buf)
74732+ return -ENOMEM;
74733+ oldfs = get_fs();
74734+ set_fs(KERNEL_DS);
74735+ n = __copy_from_user(elf_buf, (const void __user *)start, tsz);
74736+ set_fs(oldfs);
74737+ n = copy_to_user(buffer, elf_buf, tsz);
74738+ kfree(elf_buf);
74739+ if (n)
74740+ return -EFAULT;
74741 } else {
74742 if (clear_user(buffer, tsz))
74743 return -EFAULT;
74744@@ -547,6 +549,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
74745
74746 static int open_kcore(struct inode *inode, struct file *filp)
74747 {
74748+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
74749+ return -EPERM;
74750+#endif
74751 if (!capable(CAP_SYS_RAWIO))
74752 return -EPERM;
74753 if (kcore_need_update)
74754@@ -580,7 +585,7 @@ static int __meminit kcore_callback(struct notifier_block *self,
74755 return NOTIFY_OK;
74756 }
74757
74758-static struct notifier_block kcore_callback_nb __meminitdata = {
74759+static struct notifier_block kcore_callback_nb __meminitconst = {
74760 .notifier_call = kcore_callback,
74761 .priority = 0,
74762 };
74763diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
74764index d3ebf2e..6ad42d1 100644
74765--- a/fs/proc/meminfo.c
74766+++ b/fs/proc/meminfo.c
74767@@ -194,7 +194,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
74768 vmi.used >> 10,
74769 vmi.largest_chunk >> 10
74770 #ifdef CONFIG_MEMORY_FAILURE
74771- , atomic_long_read(&num_poisoned_pages) << (PAGE_SHIFT - 10)
74772+ , atomic_long_read_unchecked(&num_poisoned_pages) << (PAGE_SHIFT - 10)
74773 #endif
74774 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
74775 , K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
74776diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
74777index d4a3574..b421ce9 100644
74778--- a/fs/proc/nommu.c
74779+++ b/fs/proc/nommu.c
74780@@ -64,7 +64,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
74781
74782 if (file) {
74783 seq_pad(m, ' ');
74784- seq_path(m, &file->f_path, "");
74785+ seq_path(m, &file->f_path, "\n\\");
74786 }
74787
74788 seq_putc(m, '\n');
74789diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
74790index 1bde894..22ac7eb 100644
74791--- a/fs/proc/proc_net.c
74792+++ b/fs/proc/proc_net.c
74793@@ -23,9 +23,27 @@
74794 #include <linux/nsproxy.h>
74795 #include <net/net_namespace.h>
74796 #include <linux/seq_file.h>
74797+#include <linux/grsecurity.h>
74798
74799 #include "internal.h"
74800
74801+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
74802+static struct seq_operations *ipv6_seq_ops_addr;
74803+
74804+void register_ipv6_seq_ops_addr(struct seq_operations *addr)
74805+{
74806+ ipv6_seq_ops_addr = addr;
74807+}
74808+
74809+void unregister_ipv6_seq_ops_addr(void)
74810+{
74811+ ipv6_seq_ops_addr = NULL;
74812+}
74813+
74814+EXPORT_SYMBOL_GPL(register_ipv6_seq_ops_addr);
74815+EXPORT_SYMBOL_GPL(unregister_ipv6_seq_ops_addr);
74816+#endif
74817+
74818 static inline struct net *PDE_NET(struct proc_dir_entry *pde)
74819 {
74820 return pde->parent->data;
74821@@ -36,6 +54,8 @@ static struct net *get_proc_net(const struct inode *inode)
74822 return maybe_get_net(PDE_NET(PDE(inode)));
74823 }
74824
74825+extern const struct seq_operations dev_seq_ops;
74826+
74827 int seq_open_net(struct inode *ino, struct file *f,
74828 const struct seq_operations *ops, int size)
74829 {
74830@@ -44,6 +64,14 @@ int seq_open_net(struct inode *ino, struct file *f,
74831
74832 BUG_ON(size < sizeof(*p));
74833
74834+ /* only permit access to /proc/net/dev */
74835+ if (
74836+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
74837+ ops != ipv6_seq_ops_addr &&
74838+#endif
74839+ ops != &dev_seq_ops && gr_proc_is_restricted())
74840+ return -EACCES;
74841+
74842 net = get_proc_net(ino);
74843 if (net == NULL)
74844 return -ENXIO;
74845@@ -66,6 +94,9 @@ int single_open_net(struct inode *inode, struct file *file,
74846 int err;
74847 struct net *net;
74848
74849+ if (gr_proc_is_restricted())
74850+ return -EACCES;
74851+
74852 err = -ENXIO;
74853 net = get_proc_net(inode);
74854 if (net == NULL)
74855diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
74856index f92d5dd..26398ac 100644
74857--- a/fs/proc/proc_sysctl.c
74858+++ b/fs/proc/proc_sysctl.c
74859@@ -11,13 +11,21 @@
74860 #include <linux/namei.h>
74861 #include <linux/mm.h>
74862 #include <linux/module.h>
74863+#include <linux/nsproxy.h>
74864+#ifdef CONFIG_GRKERNSEC
74865+#include <net/net_namespace.h>
74866+#endif
74867 #include "internal.h"
74868
74869+extern int gr_handle_chroot_sysctl(const int op);
74870+extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
74871+ const int op);
74872+
74873 static const struct dentry_operations proc_sys_dentry_operations;
74874 static const struct file_operations proc_sys_file_operations;
74875-static const struct inode_operations proc_sys_inode_operations;
74876+const struct inode_operations proc_sys_inode_operations;
74877 static const struct file_operations proc_sys_dir_file_operations;
74878-static const struct inode_operations proc_sys_dir_operations;
74879+const struct inode_operations proc_sys_dir_operations;
74880
74881 void proc_sys_poll_notify(struct ctl_table_poll *poll)
74882 {
74883@@ -467,6 +475,9 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
74884
74885 err = NULL;
74886 d_set_d_op(dentry, &proc_sys_dentry_operations);
74887+
74888+ gr_handle_proc_create(dentry, inode);
74889+
74890 d_add(dentry, inode);
74891
74892 out:
74893@@ -482,6 +493,7 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
74894 struct inode *inode = file_inode(filp);
74895 struct ctl_table_header *head = grab_header(inode);
74896 struct ctl_table *table = PROC_I(inode)->sysctl_entry;
74897+ int op = write ? MAY_WRITE : MAY_READ;
74898 ssize_t error;
74899 size_t res;
74900
74901@@ -493,7 +505,7 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
74902 * and won't be until we finish.
74903 */
74904 error = -EPERM;
74905- if (sysctl_perm(head, table, write ? MAY_WRITE : MAY_READ))
74906+ if (sysctl_perm(head, table, op))
74907 goto out;
74908
74909 /* if that can happen at all, it should be -EINVAL, not -EISDIR */
74910@@ -501,6 +513,27 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
74911 if (!table->proc_handler)
74912 goto out;
74913
74914+#ifdef CONFIG_GRKERNSEC
74915+ error = -EPERM;
74916+ if (gr_handle_chroot_sysctl(op))
74917+ goto out;
74918+ dget(filp->f_path.dentry);
74919+ if (gr_handle_sysctl_mod(filp->f_path.dentry->d_parent->d_name.name, table->procname, op)) {
74920+ dput(filp->f_path.dentry);
74921+ goto out;
74922+ }
74923+ dput(filp->f_path.dentry);
74924+ if (!gr_acl_handle_open(filp->f_path.dentry, filp->f_path.mnt, op))
74925+ goto out;
74926+ if (write) {
74927+ if (current->nsproxy->net_ns != table->extra2) {
74928+ if (!capable(CAP_SYS_ADMIN))
74929+ goto out;
74930+ } else if (!ns_capable(current->nsproxy->net_ns->user_ns, CAP_NET_ADMIN))
74931+ goto out;
74932+ }
74933+#endif
74934+
74935 /* careful: calling conventions are nasty here */
74936 res = count;
74937 error = table->proc_handler(table, write, buf, &res, ppos);
74938@@ -598,6 +631,9 @@ static bool proc_sys_fill_cache(struct file *file,
74939 return false;
74940 } else {
74941 d_set_d_op(child, &proc_sys_dentry_operations);
74942+
74943+ gr_handle_proc_create(child, inode);
74944+
74945 d_add(child, inode);
74946 }
74947 } else {
74948@@ -641,6 +677,9 @@ static int scan(struct ctl_table_header *head, struct ctl_table *table,
74949 if ((*pos)++ < ctx->pos)
74950 return true;
74951
74952+ if (!gr_acl_handle_hidden_file(file->f_path.dentry, file->f_path.mnt))
74953+ return 0;
74954+
74955 if (unlikely(S_ISLNK(table->mode)))
74956 res = proc_sys_link_fill_cache(file, ctx, head, table);
74957 else
74958@@ -734,6 +773,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
74959 if (IS_ERR(head))
74960 return PTR_ERR(head);
74961
74962+ if (table && !gr_acl_handle_hidden_file(dentry, mnt))
74963+ return -ENOENT;
74964+
74965 generic_fillattr(inode, stat);
74966 if (table)
74967 stat->mode = (stat->mode & S_IFMT) | table->mode;
74968@@ -756,13 +798,13 @@ static const struct file_operations proc_sys_dir_file_operations = {
74969 .llseek = generic_file_llseek,
74970 };
74971
74972-static const struct inode_operations proc_sys_inode_operations = {
74973+const struct inode_operations proc_sys_inode_operations = {
74974 .permission = proc_sys_permission,
74975 .setattr = proc_sys_setattr,
74976 .getattr = proc_sys_getattr,
74977 };
74978
74979-static const struct inode_operations proc_sys_dir_operations = {
74980+const struct inode_operations proc_sys_dir_operations = {
74981 .lookup = proc_sys_lookup,
74982 .permission = proc_sys_permission,
74983 .setattr = proc_sys_setattr,
74984@@ -839,7 +881,7 @@ static struct ctl_dir *find_subdir(struct ctl_dir *dir,
74985 static struct ctl_dir *new_dir(struct ctl_table_set *set,
74986 const char *name, int namelen)
74987 {
74988- struct ctl_table *table;
74989+ ctl_table_no_const *table;
74990 struct ctl_dir *new;
74991 struct ctl_node *node;
74992 char *new_name;
74993@@ -851,7 +893,7 @@ static struct ctl_dir *new_dir(struct ctl_table_set *set,
74994 return NULL;
74995
74996 node = (struct ctl_node *)(new + 1);
74997- table = (struct ctl_table *)(node + 1);
74998+ table = (ctl_table_no_const *)(node + 1);
74999 new_name = (char *)(table + 2);
75000 memcpy(new_name, name, namelen);
75001 new_name[namelen] = '\0';
75002@@ -1020,7 +1062,8 @@ static int sysctl_check_table(const char *path, struct ctl_table *table)
75003 static struct ctl_table_header *new_links(struct ctl_dir *dir, struct ctl_table *table,
75004 struct ctl_table_root *link_root)
75005 {
75006- struct ctl_table *link_table, *entry, *link;
75007+ ctl_table_no_const *link_table, *link;
75008+ struct ctl_table *entry;
75009 struct ctl_table_header *links;
75010 struct ctl_node *node;
75011 char *link_name;
75012@@ -1043,7 +1086,7 @@ static struct ctl_table_header *new_links(struct ctl_dir *dir, struct ctl_table
75013 return NULL;
75014
75015 node = (struct ctl_node *)(links + 1);
75016- link_table = (struct ctl_table *)(node + nr_entries);
75017+ link_table = (ctl_table_no_const *)(node + nr_entries);
75018 link_name = (char *)&link_table[nr_entries + 1];
75019
75020 for (link = link_table, entry = table; entry->procname; link++, entry++) {
75021@@ -1291,8 +1334,8 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
75022 struct ctl_table_header ***subheader, struct ctl_table_set *set,
75023 struct ctl_table *table)
75024 {
75025- struct ctl_table *ctl_table_arg = NULL;
75026- struct ctl_table *entry, *files;
75027+ ctl_table_no_const *ctl_table_arg = NULL, *files = NULL;
75028+ struct ctl_table *entry;
75029 int nr_files = 0;
75030 int nr_dirs = 0;
75031 int err = -ENOMEM;
75032@@ -1304,10 +1347,9 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
75033 nr_files++;
75034 }
75035
75036- files = table;
75037 /* If there are mixed files and directories we need a new table */
75038 if (nr_dirs && nr_files) {
75039- struct ctl_table *new;
75040+ ctl_table_no_const *new;
75041 files = kzalloc(sizeof(struct ctl_table) * (nr_files + 1),
75042 GFP_KERNEL);
75043 if (!files)
75044@@ -1325,7 +1367,7 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
75045 /* Register everything except a directory full of subdirectories */
75046 if (nr_files || !nr_dirs) {
75047 struct ctl_table_header *header;
75048- header = __register_sysctl_table(set, path, files);
75049+ header = __register_sysctl_table(set, path, files ? files : table);
75050 if (!header) {
75051 kfree(ctl_table_arg);
75052 goto out;
75053diff --git a/fs/proc/root.c b/fs/proc/root.c
75054index e74ac9f..35e89f4 100644
75055--- a/fs/proc/root.c
75056+++ b/fs/proc/root.c
75057@@ -188,7 +188,15 @@ void __init proc_root_init(void)
75058 proc_mkdir("openprom", NULL);
75059 #endif
75060 proc_tty_init();
75061+#ifdef CONFIG_GRKERNSEC_PROC_ADD
75062+#ifdef CONFIG_GRKERNSEC_PROC_USER
75063+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
75064+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
75065+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
75066+#endif
75067+#else
75068 proc_mkdir("bus", NULL);
75069+#endif
75070 proc_sys_init();
75071 }
75072
75073diff --git a/fs/proc/stat.c b/fs/proc/stat.c
75074index 510413eb..34d9a8c 100644
75075--- a/fs/proc/stat.c
75076+++ b/fs/proc/stat.c
75077@@ -11,6 +11,7 @@
75078 #include <linux/irqnr.h>
75079 #include <linux/cputime.h>
75080 #include <linux/tick.h>
75081+#include <linux/grsecurity.h>
75082
75083 #ifndef arch_irq_stat_cpu
75084 #define arch_irq_stat_cpu(cpu) 0
75085@@ -87,6 +88,18 @@ static int show_stat(struct seq_file *p, void *v)
75086 u64 sum_softirq = 0;
75087 unsigned int per_softirq_sums[NR_SOFTIRQS] = {0};
75088 struct timespec boottime;
75089+ int unrestricted = 1;
75090+
75091+#ifdef CONFIG_GRKERNSEC_PROC_ADD
75092+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
75093+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID)
75094+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
75095+ && !in_group_p(grsec_proc_gid)
75096+#endif
75097+ )
75098+ unrestricted = 0;
75099+#endif
75100+#endif
75101
75102 user = nice = system = idle = iowait =
75103 irq = softirq = steal = 0;
75104@@ -99,23 +112,25 @@ static int show_stat(struct seq_file *p, void *v)
75105 nice += kcpustat_cpu(i).cpustat[CPUTIME_NICE];
75106 system += kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM];
75107 idle += get_idle_time(i);
75108- iowait += get_iowait_time(i);
75109- irq += kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
75110- softirq += kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
75111- steal += kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
75112- guest += kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
75113- guest_nice += kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
75114- sum += kstat_cpu_irqs_sum(i);
75115- sum += arch_irq_stat_cpu(i);
75116+ if (unrestricted) {
75117+ iowait += get_iowait_time(i);
75118+ irq += kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
75119+ softirq += kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
75120+ steal += kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
75121+ guest += kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
75122+ guest_nice += kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
75123+ sum += kstat_cpu_irqs_sum(i);
75124+ sum += arch_irq_stat_cpu(i);
75125+ for (j = 0; j < NR_SOFTIRQS; j++) {
75126+ unsigned int softirq_stat = kstat_softirqs_cpu(j, i);
75127
75128- for (j = 0; j < NR_SOFTIRQS; j++) {
75129- unsigned int softirq_stat = kstat_softirqs_cpu(j, i);
75130-
75131- per_softirq_sums[j] += softirq_stat;
75132- sum_softirq += softirq_stat;
75133+ per_softirq_sums[j] += softirq_stat;
75134+ sum_softirq += softirq_stat;
75135+ }
75136 }
75137 }
75138- sum += arch_irq_stat();
75139+ if (unrestricted)
75140+ sum += arch_irq_stat();
75141
75142 seq_puts(p, "cpu ");
75143 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(user));
75144@@ -136,12 +151,14 @@ static int show_stat(struct seq_file *p, void *v)
75145 nice = kcpustat_cpu(i).cpustat[CPUTIME_NICE];
75146 system = kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM];
75147 idle = get_idle_time(i);
75148- iowait = get_iowait_time(i);
75149- irq = kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
75150- softirq = kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
75151- steal = kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
75152- guest = kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
75153- guest_nice = kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
75154+ if (unrestricted) {
75155+ iowait = get_iowait_time(i);
75156+ irq = kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
75157+ softirq = kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
75158+ steal = kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
75159+ guest = kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
75160+ guest_nice = kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
75161+ }
75162 seq_printf(p, "cpu%d", i);
75163 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(user));
75164 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(nice));
75165@@ -159,7 +176,7 @@ static int show_stat(struct seq_file *p, void *v)
75166
75167 /* sum again ? it could be updated? */
75168 for_each_irq_nr(j)
75169- seq_put_decimal_ull(p, ' ', kstat_irqs_usr(j));
75170+ seq_put_decimal_ull(p, ' ', unrestricted ? kstat_irqs_usr(j) : 0ULL);
75171
75172 seq_printf(p,
75173 "\nctxt %llu\n"
75174@@ -167,11 +184,11 @@ static int show_stat(struct seq_file *p, void *v)
75175 "processes %lu\n"
75176 "procs_running %lu\n"
75177 "procs_blocked %lu\n",
75178- nr_context_switches(),
75179+ unrestricted ? nr_context_switches() : 0ULL,
75180 (unsigned long)jif,
75181- total_forks,
75182- nr_running(),
75183- nr_iowait());
75184+ unrestricted ? total_forks : 0UL,
75185+ unrestricted ? nr_running() : 0UL,
75186+ unrestricted ? nr_iowait() : 0UL);
75187
75188 seq_printf(p, "softirq %llu", (unsigned long long)sum_softirq);
75189
75190diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
75191index 6dee68d..1b4add0 100644
75192--- a/fs/proc/task_mmu.c
75193+++ b/fs/proc/task_mmu.c
75194@@ -13,12 +13,19 @@
75195 #include <linux/swap.h>
75196 #include <linux/swapops.h>
75197 #include <linux/mmu_notifier.h>
75198+#include <linux/grsecurity.h>
75199
75200 #include <asm/elf.h>
75201 #include <asm/uaccess.h>
75202 #include <asm/tlbflush.h>
75203 #include "internal.h"
75204
75205+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
75206+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
75207+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
75208+ _mm->pax_flags & MF_PAX_SEGMEXEC))
75209+#endif
75210+
75211 void task_mem(struct seq_file *m, struct mm_struct *mm)
75212 {
75213 unsigned long data, text, lib, swap, ptes, pmds;
75214@@ -57,8 +64,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
75215 "VmLib:\t%8lu kB\n"
75216 "VmPTE:\t%8lu kB\n"
75217 "VmPMD:\t%8lu kB\n"
75218- "VmSwap:\t%8lu kB\n",
75219- hiwater_vm << (PAGE_SHIFT-10),
75220+ "VmSwap:\t%8lu kB\n"
75221+
75222+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
75223+ "CsBase:\t%8lx\nCsLim:\t%8lx\n"
75224+#endif
75225+
75226+ ,hiwater_vm << (PAGE_SHIFT-10),
75227 total_vm << (PAGE_SHIFT-10),
75228 mm->locked_vm << (PAGE_SHIFT-10),
75229 mm->pinned_vm << (PAGE_SHIFT-10),
75230@@ -68,7 +80,19 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
75231 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
75232 ptes >> 10,
75233 pmds >> 10,
75234- swap << (PAGE_SHIFT-10));
75235+ swap << (PAGE_SHIFT-10)
75236+
75237+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
75238+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
75239+ , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_base
75240+ , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_limit
75241+#else
75242+ , mm->context.user_cs_base
75243+ , mm->context.user_cs_limit
75244+#endif
75245+#endif
75246+
75247+ );
75248 }
75249
75250 unsigned long task_vsize(struct mm_struct *mm)
75251@@ -285,13 +309,13 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
75252 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
75253 }
75254
75255- /* We don't show the stack guard page in /proc/maps */
75256+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
75257+ start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
75258+ end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
75259+#else
75260 start = vma->vm_start;
75261- if (stack_guard_page_start(vma, start))
75262- start += PAGE_SIZE;
75263 end = vma->vm_end;
75264- if (stack_guard_page_end(vma, end))
75265- end -= PAGE_SIZE;
75266+#endif
75267
75268 seq_setwidth(m, 25 + sizeof(void *) * 6 - 1);
75269 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ",
75270@@ -301,7 +325,11 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
75271 flags & VM_WRITE ? 'w' : '-',
75272 flags & VM_EXEC ? 'x' : '-',
75273 flags & VM_MAYSHARE ? 's' : 'p',
75274+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
75275+ PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
75276+#else
75277 pgoff,
75278+#endif
75279 MAJOR(dev), MINOR(dev), ino);
75280
75281 /*
75282@@ -310,7 +338,7 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
75283 */
75284 if (file) {
75285 seq_pad(m, ' ');
75286- seq_path(m, &file->f_path, "\n");
75287+ seq_path(m, &file->f_path, "\n\\");
75288 goto done;
75289 }
75290
75291@@ -341,8 +369,9 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
75292 * Thread stack in /proc/PID/task/TID/maps or
75293 * the main process stack.
75294 */
75295- if (!is_pid || (vma->vm_start <= mm->start_stack &&
75296- vma->vm_end >= mm->start_stack)) {
75297+ if (!is_pid || (vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
75298+ (vma->vm_start <= mm->start_stack &&
75299+ vma->vm_end >= mm->start_stack)) {
75300 name = "[stack]";
75301 } else {
75302 /* Thread stack in /proc/PID/maps */
75303@@ -362,6 +391,12 @@ done:
75304
75305 static int show_map(struct seq_file *m, void *v, int is_pid)
75306 {
75307+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
75308+ if (current->exec_id != m->exec_id) {
75309+ gr_log_badprocpid("maps");
75310+ return 0;
75311+ }
75312+#endif
75313 show_map_vma(m, v, is_pid);
75314 m_cache_vma(m, v);
75315 return 0;
75316@@ -620,9 +655,18 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
75317 .private = &mss,
75318 };
75319
75320+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
75321+ if (current->exec_id != m->exec_id) {
75322+ gr_log_badprocpid("smaps");
75323+ return 0;
75324+ }
75325+#endif
75326 memset(&mss, 0, sizeof mss);
75327- /* mmap_sem is held in m_start */
75328- walk_page_vma(vma, &smaps_walk);
75329+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
75330+ if (!PAX_RAND_FLAGS(vma->vm_mm))
75331+#endif
75332+ /* mmap_sem is held in m_start */
75333+ walk_page_vma(vma, &smaps_walk);
75334
75335 show_map_vma(m, vma, is_pid);
75336
75337@@ -641,7 +685,11 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
75338 "KernelPageSize: %8lu kB\n"
75339 "MMUPageSize: %8lu kB\n"
75340 "Locked: %8lu kB\n",
75341+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
75342+ PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
75343+#else
75344 (vma->vm_end - vma->vm_start) >> 10,
75345+#endif
75346 mss.resident >> 10,
75347 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
75348 mss.shared_clean >> 10,
75349@@ -1491,6 +1539,13 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
75350 char buffer[64];
75351 int nid;
75352
75353+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
75354+ if (current->exec_id != m->exec_id) {
75355+ gr_log_badprocpid("numa_maps");
75356+ return 0;
75357+ }
75358+#endif
75359+
75360 if (!mm)
75361 return 0;
75362
75363@@ -1505,11 +1560,15 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
75364 mpol_to_str(buffer, sizeof(buffer), proc_priv->task_mempolicy);
75365 }
75366
75367+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
75368+ seq_printf(m, "%08lx %s", PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : vma->vm_start, buffer);
75369+#else
75370 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
75371+#endif
75372
75373 if (file) {
75374 seq_puts(m, " file=");
75375- seq_path(m, &file->f_path, "\n\t= ");
75376+ seq_path(m, &file->f_path, "\n\t\\= ");
75377 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
75378 seq_puts(m, " heap");
75379 } else {
75380diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
75381index 599ec2e..f1413ae 100644
75382--- a/fs/proc/task_nommu.c
75383+++ b/fs/proc/task_nommu.c
75384@@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
75385 else
75386 bytes += kobjsize(mm);
75387
75388- if (current->fs && current->fs->users > 1)
75389+ if (current->fs && atomic_read(&current->fs->users) > 1)
75390 sbytes += kobjsize(current->fs);
75391 else
75392 bytes += kobjsize(current->fs);
75393@@ -180,7 +180,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma,
75394
75395 if (file) {
75396 seq_pad(m, ' ');
75397- seq_path(m, &file->f_path, "");
75398+ seq_path(m, &file->f_path, "\n\\");
75399 } else if (mm) {
75400 pid_t tid = pid_of_stack(priv, vma, is_pid);
75401
75402diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
75403index 4e61388..1a2523d 100644
75404--- a/fs/proc/vmcore.c
75405+++ b/fs/proc/vmcore.c
75406@@ -105,9 +105,13 @@ static ssize_t read_from_oldmem(char *buf, size_t count,
75407 nr_bytes = count;
75408
75409 /* If pfn is not ram, return zeros for sparse dump files */
75410- if (pfn_is_ram(pfn) == 0)
75411- memset(buf, 0, nr_bytes);
75412- else {
75413+ if (pfn_is_ram(pfn) == 0) {
75414+ if (userbuf) {
75415+ if (clear_user((char __force_user *)buf, nr_bytes))
75416+ return -EFAULT;
75417+ } else
75418+ memset(buf, 0, nr_bytes);
75419+ } else {
75420 tmp = copy_oldmem_page(pfn, buf, nr_bytes,
75421 offset, userbuf);
75422 if (tmp < 0)
75423@@ -170,7 +174,7 @@ int __weak remap_oldmem_pfn_range(struct vm_area_struct *vma,
75424 static int copy_to(void *target, void *src, size_t size, int userbuf)
75425 {
75426 if (userbuf) {
75427- if (copy_to_user((char __user *) target, src, size))
75428+ if (copy_to_user((char __force_user *) target, src, size))
75429 return -EFAULT;
75430 } else {
75431 memcpy(target, src, size);
75432@@ -233,7 +237,7 @@ static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos,
75433 if (*fpos < m->offset + m->size) {
75434 tsz = min_t(size_t, m->offset + m->size - *fpos, buflen);
75435 start = m->paddr + *fpos - m->offset;
75436- tmp = read_from_oldmem(buffer, tsz, &start, userbuf);
75437+ tmp = read_from_oldmem((char __force_kernel *)buffer, tsz, &start, userbuf);
75438 if (tmp < 0)
75439 return tmp;
75440 buflen -= tsz;
75441@@ -253,7 +257,7 @@ static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos,
75442 static ssize_t read_vmcore(struct file *file, char __user *buffer,
75443 size_t buflen, loff_t *fpos)
75444 {
75445- return __read_vmcore((__force char *) buffer, buflen, fpos, 1);
75446+ return __read_vmcore((__force_kernel char *) buffer, buflen, fpos, 1);
75447 }
75448
75449 /*
75450diff --git a/fs/qnx6/qnx6.h b/fs/qnx6/qnx6.h
75451index d3fb2b6..43a8140 100644
75452--- a/fs/qnx6/qnx6.h
75453+++ b/fs/qnx6/qnx6.h
75454@@ -74,7 +74,7 @@ enum {
75455 BYTESEX_BE,
75456 };
75457
75458-static inline __u64 fs64_to_cpu(struct qnx6_sb_info *sbi, __fs64 n)
75459+static inline __u64 __intentional_overflow(-1) fs64_to_cpu(struct qnx6_sb_info *sbi, __fs64 n)
75460 {
75461 if (sbi->s_bytesex == BYTESEX_LE)
75462 return le64_to_cpu((__force __le64)n);
75463@@ -90,7 +90,7 @@ static inline __fs64 cpu_to_fs64(struct qnx6_sb_info *sbi, __u64 n)
75464 return (__force __fs64)cpu_to_be64(n);
75465 }
75466
75467-static inline __u32 fs32_to_cpu(struct qnx6_sb_info *sbi, __fs32 n)
75468+static inline __u32 __intentional_overflow(-1) fs32_to_cpu(struct qnx6_sb_info *sbi, __fs32 n)
75469 {
75470 if (sbi->s_bytesex == BYTESEX_LE)
75471 return le32_to_cpu((__force __le32)n);
75472diff --git a/fs/quota/netlink.c b/fs/quota/netlink.c
75473index bb2869f..d34ada8 100644
75474--- a/fs/quota/netlink.c
75475+++ b/fs/quota/netlink.c
75476@@ -44,7 +44,7 @@ static struct genl_family quota_genl_family = {
75477 void quota_send_warning(struct kqid qid, dev_t dev,
75478 const char warntype)
75479 {
75480- static atomic_t seq;
75481+ static atomic_unchecked_t seq;
75482 struct sk_buff *skb;
75483 void *msg_head;
75484 int ret;
75485@@ -60,7 +60,7 @@ void quota_send_warning(struct kqid qid, dev_t dev,
75486 "VFS: Not enough memory to send quota warning.\n");
75487 return;
75488 }
75489- msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
75490+ msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
75491 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
75492 if (!msg_head) {
75493 printk(KERN_ERR
75494diff --git a/fs/read_write.c b/fs/read_write.c
75495index 8e1b687..bad2eec 100644
75496--- a/fs/read_write.c
75497+++ b/fs/read_write.c
75498@@ -553,7 +553,7 @@ ssize_t __kernel_write(struct file *file, const char *buf, size_t count, loff_t
75499
75500 old_fs = get_fs();
75501 set_fs(get_ds());
75502- p = (__force const char __user *)buf;
75503+ p = (const char __force_user *)buf;
75504 if (count > MAX_RW_COUNT)
75505 count = MAX_RW_COUNT;
75506 if (file->f_op->write)
75507diff --git a/fs/readdir.c b/fs/readdir.c
75508index ced6791..936687b 100644
75509--- a/fs/readdir.c
75510+++ b/fs/readdir.c
75511@@ -18,6 +18,7 @@
75512 #include <linux/security.h>
75513 #include <linux/syscalls.h>
75514 #include <linux/unistd.h>
75515+#include <linux/namei.h>
75516
75517 #include <asm/uaccess.h>
75518
75519@@ -71,6 +72,7 @@ struct old_linux_dirent {
75520 struct readdir_callback {
75521 struct dir_context ctx;
75522 struct old_linux_dirent __user * dirent;
75523+ struct file * file;
75524 int result;
75525 };
75526
75527@@ -89,6 +91,10 @@ static int fillonedir(struct dir_context *ctx, const char *name, int namlen,
75528 buf->result = -EOVERFLOW;
75529 return -EOVERFLOW;
75530 }
75531+
75532+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
75533+ return 0;
75534+
75535 buf->result++;
75536 dirent = buf->dirent;
75537 if (!access_ok(VERIFY_WRITE, dirent,
75538@@ -120,6 +126,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
75539 if (!f.file)
75540 return -EBADF;
75541
75542+ buf.file = f.file;
75543 error = iterate_dir(f.file, &buf.ctx);
75544 if (buf.result)
75545 error = buf.result;
75546@@ -145,6 +152,7 @@ struct getdents_callback {
75547 struct dir_context ctx;
75548 struct linux_dirent __user * current_dir;
75549 struct linux_dirent __user * previous;
75550+ struct file * file;
75551 int count;
75552 int error;
75553 };
75554@@ -167,6 +175,10 @@ static int filldir(struct dir_context *ctx, const char *name, int namlen,
75555 buf->error = -EOVERFLOW;
75556 return -EOVERFLOW;
75557 }
75558+
75559+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
75560+ return 0;
75561+
75562 dirent = buf->previous;
75563 if (dirent) {
75564 if (__put_user(offset, &dirent->d_off))
75565@@ -212,6 +224,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
75566 if (!f.file)
75567 return -EBADF;
75568
75569+ buf.file = f.file;
75570 error = iterate_dir(f.file, &buf.ctx);
75571 if (error >= 0)
75572 error = buf.error;
75573@@ -230,6 +243,7 @@ struct getdents_callback64 {
75574 struct dir_context ctx;
75575 struct linux_dirent64 __user * current_dir;
75576 struct linux_dirent64 __user * previous;
75577+ struct file *file;
75578 int count;
75579 int error;
75580 };
75581@@ -246,6 +260,10 @@ static int filldir64(struct dir_context *ctx, const char *name, int namlen,
75582 buf->error = -EINVAL; /* only used if we fail.. */
75583 if (reclen > buf->count)
75584 return -EINVAL;
75585+
75586+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
75587+ return 0;
75588+
75589 dirent = buf->previous;
75590 if (dirent) {
75591 if (__put_user(offset, &dirent->d_off))
75592@@ -293,6 +311,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
75593 if (!f.file)
75594 return -EBADF;
75595
75596+ buf.file = f.file;
75597 error = iterate_dir(f.file, &buf.ctx);
75598 if (error >= 0)
75599 error = buf.error;
75600diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
75601index 9c02d96..6562c10 100644
75602--- a/fs/reiserfs/do_balan.c
75603+++ b/fs/reiserfs/do_balan.c
75604@@ -1887,7 +1887,7 @@ void do_balance(struct tree_balance *tb, struct item_head *ih,
75605 return;
75606 }
75607
75608- atomic_inc(&fs_generation(tb->tb_sb));
75609+ atomic_inc_unchecked(&fs_generation(tb->tb_sb));
75610 do_balance_starts(tb);
75611
75612 /*
75613diff --git a/fs/reiserfs/item_ops.c b/fs/reiserfs/item_ops.c
75614index aca73dd..e3c558d 100644
75615--- a/fs/reiserfs/item_ops.c
75616+++ b/fs/reiserfs/item_ops.c
75617@@ -724,18 +724,18 @@ static void errcatch_print_vi(struct virtual_item *vi)
75618 }
75619
75620 static struct item_operations errcatch_ops = {
75621- errcatch_bytes_number,
75622- errcatch_decrement_key,
75623- errcatch_is_left_mergeable,
75624- errcatch_print_item,
75625- errcatch_check_item,
75626+ .bytes_number = errcatch_bytes_number,
75627+ .decrement_key = errcatch_decrement_key,
75628+ .is_left_mergeable = errcatch_is_left_mergeable,
75629+ .print_item = errcatch_print_item,
75630+ .check_item = errcatch_check_item,
75631
75632- errcatch_create_vi,
75633- errcatch_check_left,
75634- errcatch_check_right,
75635- errcatch_part_size,
75636- errcatch_unit_num,
75637- errcatch_print_vi
75638+ .create_vi = errcatch_create_vi,
75639+ .check_left = errcatch_check_left,
75640+ .check_right = errcatch_check_right,
75641+ .part_size = errcatch_part_size,
75642+ .unit_num = errcatch_unit_num,
75643+ .print_vi = errcatch_print_vi
75644 };
75645
75646 #if ! (TYPE_STAT_DATA == 0 && TYPE_INDIRECT == 1 && TYPE_DIRECT == 2 && TYPE_DIRENTRY == 3)
75647diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
75648index 621b9f3..af527fd 100644
75649--- a/fs/reiserfs/procfs.c
75650+++ b/fs/reiserfs/procfs.c
75651@@ -114,7 +114,7 @@ static int show_super(struct seq_file *m, void *unused)
75652 "SMALL_TAILS " : "NO_TAILS ",
75653 replay_only(sb) ? "REPLAY_ONLY " : "",
75654 convert_reiserfs(sb) ? "CONV " : "",
75655- atomic_read(&r->s_generation_counter),
75656+ atomic_read_unchecked(&r->s_generation_counter),
75657 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
75658 SF(s_do_balance), SF(s_unneeded_left_neighbor),
75659 SF(s_good_search_by_key_reada), SF(s_bmaps),
75660diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h
75661index bb79cdd..fcf49ef 100644
75662--- a/fs/reiserfs/reiserfs.h
75663+++ b/fs/reiserfs/reiserfs.h
75664@@ -580,7 +580,7 @@ struct reiserfs_sb_info {
75665 /* Comment? -Hans */
75666 wait_queue_head_t s_wait;
75667 /* increased by one every time the tree gets re-balanced */
75668- atomic_t s_generation_counter;
75669+ atomic_unchecked_t s_generation_counter;
75670
75671 /* File system properties. Currently holds on-disk FS format */
75672 unsigned long s_properties;
75673@@ -2301,7 +2301,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
75674 #define REISERFS_USER_MEM 1 /* user memory mode */
75675
75676 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
75677-#define get_generation(s) atomic_read (&fs_generation(s))
75678+#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
75679 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
75680 #define __fs_changed(gen,s) (gen != get_generation (s))
75681 #define fs_changed(gen,s) \
75682diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
75683index 71fbbe3..eff29ba 100644
75684--- a/fs/reiserfs/super.c
75685+++ b/fs/reiserfs/super.c
75686@@ -1868,6 +1868,10 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
75687 sbi->s_mount_opt |= (1 << REISERFS_SMALLTAIL);
75688 sbi->s_mount_opt |= (1 << REISERFS_ERROR_RO);
75689 sbi->s_mount_opt |= (1 << REISERFS_BARRIER_FLUSH);
75690+#ifdef CONFIG_REISERFS_FS_XATTR
75691+ /* turn on user xattrs by default */
75692+ sbi->s_mount_opt |= (1 << REISERFS_XATTRS_USER);
75693+#endif
75694 /* no preallocation minimum, be smart in reiserfs_file_write instead */
75695 sbi->s_alloc_options.preallocmin = 0;
75696 /* Preallocate by 16 blocks (17-1) at once */
75697diff --git a/fs/select.c b/fs/select.c
75698index f684c75..4117611 100644
75699--- a/fs/select.c
75700+++ b/fs/select.c
75701@@ -20,6 +20,7 @@
75702 #include <linux/export.h>
75703 #include <linux/slab.h>
75704 #include <linux/poll.h>
75705+#include <linux/security.h>
75706 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
75707 #include <linux/file.h>
75708 #include <linux/fdtable.h>
75709@@ -880,6 +881,7 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
75710 struct poll_list *walk = head;
75711 unsigned long todo = nfds;
75712
75713+ gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
75714 if (nfds > rlimit(RLIMIT_NOFILE))
75715 return -EINVAL;
75716
75717diff --git a/fs/seq_file.c b/fs/seq_file.c
75718index 555f821..34684d7 100644
75719--- a/fs/seq_file.c
75720+++ b/fs/seq_file.c
75721@@ -12,6 +12,8 @@
75722 #include <linux/slab.h>
75723 #include <linux/cred.h>
75724 #include <linux/mm.h>
75725+#include <linux/sched.h>
75726+#include <linux/grsecurity.h>
75727
75728 #include <asm/uaccess.h>
75729 #include <asm/page.h>
75730@@ -23,16 +25,7 @@ static void seq_set_overflow(struct seq_file *m)
75731
75732 static void *seq_buf_alloc(unsigned long size)
75733 {
75734- void *buf;
75735-
75736- /*
75737- * __GFP_NORETRY to avoid oom-killings with high-order allocations -
75738- * it's better to fall back to vmalloc() than to kill things.
75739- */
75740- buf = kmalloc(size, GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN);
75741- if (!buf && size > PAGE_SIZE)
75742- buf = vmalloc(size);
75743- return buf;
75744+ return kmalloc(size, GFP_KERNEL | GFP_USERCOPY);
75745 }
75746
75747 /**
75748@@ -65,6 +58,9 @@ int seq_open(struct file *file, const struct seq_operations *op)
75749 #ifdef CONFIG_USER_NS
75750 p->user_ns = file->f_cred->user_ns;
75751 #endif
75752+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
75753+ p->exec_id = current->exec_id;
75754+#endif
75755
75756 /*
75757 * Wrappers around seq_open(e.g. swaps_open) need to be
75758@@ -87,6 +83,16 @@ int seq_open(struct file *file, const struct seq_operations *op)
75759 }
75760 EXPORT_SYMBOL(seq_open);
75761
75762+
75763+int seq_open_restrict(struct file *file, const struct seq_operations *op)
75764+{
75765+ if (gr_proc_is_restricted())
75766+ return -EACCES;
75767+
75768+ return seq_open(file, op);
75769+}
75770+EXPORT_SYMBOL(seq_open_restrict);
75771+
75772 static int traverse(struct seq_file *m, loff_t offset)
75773 {
75774 loff_t pos = 0, index;
75775@@ -158,7 +164,7 @@ Eoverflow:
75776 ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
75777 {
75778 struct seq_file *m = file->private_data;
75779- size_t copied = 0;
75780+ ssize_t copied = 0;
75781 loff_t pos;
75782 size_t n;
75783 void *p;
75784@@ -557,7 +563,7 @@ static void single_stop(struct seq_file *p, void *v)
75785 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
75786 void *data)
75787 {
75788- struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
75789+ seq_operations_no_const *op = kzalloc(sizeof(*op), GFP_KERNEL);
75790 int res = -ENOMEM;
75791
75792 if (op) {
75793@@ -593,6 +599,17 @@ int single_open_size(struct file *file, int (*show)(struct seq_file *, void *),
75794 }
75795 EXPORT_SYMBOL(single_open_size);
75796
75797+int single_open_restrict(struct file *file, int (*show)(struct seq_file *, void *),
75798+ void *data)
75799+{
75800+ if (gr_proc_is_restricted())
75801+ return -EACCES;
75802+
75803+ return single_open(file, show, data);
75804+}
75805+EXPORT_SYMBOL(single_open_restrict);
75806+
75807+
75808 int single_release(struct inode *inode, struct file *file)
75809 {
75810 const struct seq_operations *op = ((struct seq_file *)file->private_data)->op;
75811diff --git a/fs/splice.c b/fs/splice.c
75812index 7968da9..4ce985b 100644
75813--- a/fs/splice.c
75814+++ b/fs/splice.c
75815@@ -193,7 +193,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
75816 pipe_lock(pipe);
75817
75818 for (;;) {
75819- if (!pipe->readers) {
75820+ if (!atomic_read(&pipe->readers)) {
75821 send_sig(SIGPIPE, current, 0);
75822 if (!ret)
75823 ret = -EPIPE;
75824@@ -216,7 +216,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
75825 page_nr++;
75826 ret += buf->len;
75827
75828- if (pipe->files)
75829+ if (atomic_read(&pipe->files))
75830 do_wakeup = 1;
75831
75832 if (!--spd->nr_pages)
75833@@ -247,9 +247,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
75834 do_wakeup = 0;
75835 }
75836
75837- pipe->waiting_writers++;
75838+ atomic_inc(&pipe->waiting_writers);
75839 pipe_wait(pipe);
75840- pipe->waiting_writers--;
75841+ atomic_dec(&pipe->waiting_writers);
75842 }
75843
75844 pipe_unlock(pipe);
75845@@ -576,7 +576,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
75846 old_fs = get_fs();
75847 set_fs(get_ds());
75848 /* The cast to a user pointer is valid due to the set_fs() */
75849- res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
75850+ res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
75851 set_fs(old_fs);
75852
75853 return res;
75854@@ -591,7 +591,7 @@ ssize_t kernel_write(struct file *file, const char *buf, size_t count,
75855 old_fs = get_fs();
75856 set_fs(get_ds());
75857 /* The cast to a user pointer is valid due to the set_fs() */
75858- res = vfs_write(file, (__force const char __user *)buf, count, &pos);
75859+ res = vfs_write(file, (const char __force_user *)buf, count, &pos);
75860 set_fs(old_fs);
75861
75862 return res;
75863@@ -644,7 +644,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
75864 goto err;
75865
75866 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
75867- vec[i].iov_base = (void __user *) page_address(page);
75868+ vec[i].iov_base = (void __force_user *) page_address(page);
75869 vec[i].iov_len = this_len;
75870 spd.pages[i] = page;
75871 spd.nr_pages++;
75872@@ -783,7 +783,7 @@ static int splice_from_pipe_feed(struct pipe_inode_info *pipe, struct splice_des
75873 ops->release(pipe, buf);
75874 pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
75875 pipe->nrbufs--;
75876- if (pipe->files)
75877+ if (atomic_read(&pipe->files))
75878 sd->need_wakeup = true;
75879 }
75880
75881@@ -807,10 +807,10 @@ static int splice_from_pipe_feed(struct pipe_inode_info *pipe, struct splice_des
75882 static int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
75883 {
75884 while (!pipe->nrbufs) {
75885- if (!pipe->writers)
75886+ if (!atomic_read(&pipe->writers))
75887 return 0;
75888
75889- if (!pipe->waiting_writers && sd->num_spliced)
75890+ if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
75891 return 0;
75892
75893 if (sd->flags & SPLICE_F_NONBLOCK)
75894@@ -1025,7 +1025,7 @@ iter_file_splice_write(struct pipe_inode_info *pipe, struct file *out,
75895 ops->release(pipe, buf);
75896 pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
75897 pipe->nrbufs--;
75898- if (pipe->files)
75899+ if (atomic_read(&pipe->files))
75900 sd.need_wakeup = true;
75901 } else {
75902 buf->offset += ret;
75903@@ -1159,7 +1159,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
75904 long ret, bytes;
75905 umode_t i_mode;
75906 size_t len;
75907- int i, flags;
75908+ int i, flags, more;
75909
75910 /*
75911 * We require the input being a regular file, as we don't want to
75912@@ -1185,7 +1185,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
75913 * out of the pipe right after the splice_to_pipe(). So set
75914 * PIPE_READERS appropriately.
75915 */
75916- pipe->readers = 1;
75917+ atomic_set(&pipe->readers, 1);
75918
75919 current->splice_pipe = pipe;
75920 }
75921@@ -1202,6 +1202,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
75922 * Don't block on output, we have to drain the direct pipe.
75923 */
75924 sd->flags &= ~SPLICE_F_NONBLOCK;
75925+ more = sd->flags & SPLICE_F_MORE;
75926
75927 while (len) {
75928 size_t read_len;
75929@@ -1215,6 +1216,15 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
75930 sd->total_len = read_len;
75931
75932 /*
75933+ * If more data is pending, set SPLICE_F_MORE
75934+ * If this is the last data and SPLICE_F_MORE was not set
75935+ * initially, clears it.
75936+ */
75937+ if (read_len < len)
75938+ sd->flags |= SPLICE_F_MORE;
75939+ else if (!more)
75940+ sd->flags &= ~SPLICE_F_MORE;
75941+ /*
75942 * NOTE: nonblocking mode only applies to the input. We
75943 * must not do the output in nonblocking mode as then we
75944 * could get stuck data in the internal pipe:
75945@@ -1482,6 +1492,7 @@ static int get_iovec_page_array(const struct iovec __user *iov,
75946
75947 partial[buffers].offset = off;
75948 partial[buffers].len = plen;
75949+ partial[buffers].private = 0;
75950
75951 off = 0;
75952 len -= plen;
75953@@ -1718,9 +1729,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
75954 ret = -ERESTARTSYS;
75955 break;
75956 }
75957- if (!pipe->writers)
75958+ if (!atomic_read(&pipe->writers))
75959 break;
75960- if (!pipe->waiting_writers) {
75961+ if (!atomic_read(&pipe->waiting_writers)) {
75962 if (flags & SPLICE_F_NONBLOCK) {
75963 ret = -EAGAIN;
75964 break;
75965@@ -1752,7 +1763,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
75966 pipe_lock(pipe);
75967
75968 while (pipe->nrbufs >= pipe->buffers) {
75969- if (!pipe->readers) {
75970+ if (!atomic_read(&pipe->readers)) {
75971 send_sig(SIGPIPE, current, 0);
75972 ret = -EPIPE;
75973 break;
75974@@ -1765,9 +1776,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
75975 ret = -ERESTARTSYS;
75976 break;
75977 }
75978- pipe->waiting_writers++;
75979+ atomic_inc(&pipe->waiting_writers);
75980 pipe_wait(pipe);
75981- pipe->waiting_writers--;
75982+ atomic_dec(&pipe->waiting_writers);
75983 }
75984
75985 pipe_unlock(pipe);
75986@@ -1803,14 +1814,14 @@ retry:
75987 pipe_double_lock(ipipe, opipe);
75988
75989 do {
75990- if (!opipe->readers) {
75991+ if (!atomic_read(&opipe->readers)) {
75992 send_sig(SIGPIPE, current, 0);
75993 if (!ret)
75994 ret = -EPIPE;
75995 break;
75996 }
75997
75998- if (!ipipe->nrbufs && !ipipe->writers)
75999+ if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
76000 break;
76001
76002 /*
76003@@ -1907,7 +1918,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
76004 pipe_double_lock(ipipe, opipe);
76005
76006 do {
76007- if (!opipe->readers) {
76008+ if (!atomic_read(&opipe->readers)) {
76009 send_sig(SIGPIPE, current, 0);
76010 if (!ret)
76011 ret = -EPIPE;
76012@@ -1952,7 +1963,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
76013 * return EAGAIN if we have the potential of some data in the
76014 * future, otherwise just return 0
76015 */
76016- if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
76017+ if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
76018 ret = -EAGAIN;
76019
76020 pipe_unlock(ipipe);
76021diff --git a/fs/squashfs/xattr.c b/fs/squashfs/xattr.c
76022index 92fcde7..1687329 100644
76023--- a/fs/squashfs/xattr.c
76024+++ b/fs/squashfs/xattr.c
76025@@ -46,8 +46,8 @@ ssize_t squashfs_listxattr(struct dentry *d, char *buffer,
76026 + msblk->xattr_table;
76027 int offset = SQUASHFS_XATTR_OFFSET(squashfs_i(inode)->xattr);
76028 int count = squashfs_i(inode)->xattr_count;
76029- size_t rest = buffer_size;
76030- int err;
76031+ size_t used = 0;
76032+ ssize_t err;
76033
76034 /* check that the file system has xattrs */
76035 if (msblk->xattr_id_table == NULL)
76036@@ -68,11 +68,11 @@ ssize_t squashfs_listxattr(struct dentry *d, char *buffer,
76037 name_size = le16_to_cpu(entry.size);
76038 handler = squashfs_xattr_handler(le16_to_cpu(entry.type));
76039 if (handler)
76040- prefix_size = handler->list(d, buffer, rest, NULL,
76041+ prefix_size = handler->list(d, buffer, buffer ? buffer_size - used : 0, NULL,
76042 name_size, handler->flags);
76043 if (prefix_size) {
76044 if (buffer) {
76045- if (prefix_size + name_size + 1 > rest) {
76046+ if (prefix_size + name_size + 1 > buffer_size - used) {
76047 err = -ERANGE;
76048 goto failed;
76049 }
76050@@ -86,7 +86,7 @@ ssize_t squashfs_listxattr(struct dentry *d, char *buffer,
76051 buffer[name_size] = '\0';
76052 buffer += name_size + 1;
76053 }
76054- rest -= prefix_size + name_size + 1;
76055+ used += prefix_size + name_size + 1;
76056 } else {
76057 /* no handler or insuffficient privileges, so skip */
76058 err = squashfs_read_metadata(sb, NULL, &start,
76059@@ -107,7 +107,7 @@ ssize_t squashfs_listxattr(struct dentry *d, char *buffer,
76060 if (err < 0)
76061 goto failed;
76062 }
76063- err = buffer_size - rest;
76064+ err = used;
76065
76066 failed:
76067 return err;
76068diff --git a/fs/stat.c b/fs/stat.c
76069index ae0c3ce..9ee641c 100644
76070--- a/fs/stat.c
76071+++ b/fs/stat.c
76072@@ -28,8 +28,13 @@ void generic_fillattr(struct inode *inode, struct kstat *stat)
76073 stat->gid = inode->i_gid;
76074 stat->rdev = inode->i_rdev;
76075 stat->size = i_size_read(inode);
76076- stat->atime = inode->i_atime;
76077- stat->mtime = inode->i_mtime;
76078+ if (is_sidechannel_device(inode) && !capable_nolog(CAP_MKNOD)) {
76079+ stat->atime = inode->i_ctime;
76080+ stat->mtime = inode->i_ctime;
76081+ } else {
76082+ stat->atime = inode->i_atime;
76083+ stat->mtime = inode->i_mtime;
76084+ }
76085 stat->ctime = inode->i_ctime;
76086 stat->blksize = (1 << inode->i_blkbits);
76087 stat->blocks = inode->i_blocks;
76088@@ -52,9 +57,16 @@ EXPORT_SYMBOL(generic_fillattr);
76089 int vfs_getattr_nosec(struct path *path, struct kstat *stat)
76090 {
76091 struct inode *inode = path->dentry->d_inode;
76092+ int retval;
76093
76094- if (inode->i_op->getattr)
76095- return inode->i_op->getattr(path->mnt, path->dentry, stat);
76096+ if (inode->i_op->getattr) {
76097+ retval = inode->i_op->getattr(path->mnt, path->dentry, stat);
76098+ if (!retval && is_sidechannel_device(inode) && !capable_nolog(CAP_MKNOD)) {
76099+ stat->atime = stat->ctime;
76100+ stat->mtime = stat->ctime;
76101+ }
76102+ return retval;
76103+ }
76104
76105 generic_fillattr(inode, stat);
76106 return 0;
76107diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
76108index 0b45ff4..edf9d3a 100644
76109--- a/fs/sysfs/dir.c
76110+++ b/fs/sysfs/dir.c
76111@@ -33,6 +33,10 @@ void sysfs_warn_dup(struct kernfs_node *parent, const char *name)
76112 kfree(buf);
76113 }
76114
76115+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
76116+extern int grsec_enable_sysfs_restrict;
76117+#endif
76118+
76119 /**
76120 * sysfs_create_dir_ns - create a directory for an object with a namespace tag
76121 * @kobj: object we're creating directory for
76122@@ -41,9 +45,16 @@ void sysfs_warn_dup(struct kernfs_node *parent, const char *name)
76123 int sysfs_create_dir_ns(struct kobject *kobj, const void *ns)
76124 {
76125 struct kernfs_node *parent, *kn;
76126+ const char *name;
76127+ umode_t mode = S_IRWXU | S_IRUGO | S_IXUGO;
76128+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
76129+ const char *parent_name;
76130+#endif
76131
76132 BUG_ON(!kobj);
76133
76134+ name = kobject_name(kobj);
76135+
76136 if (kobj->parent)
76137 parent = kobj->parent->sd;
76138 else
76139@@ -52,11 +63,24 @@ int sysfs_create_dir_ns(struct kobject *kobj, const void *ns)
76140 if (!parent)
76141 return -ENOENT;
76142
76143- kn = kernfs_create_dir_ns(parent, kobject_name(kobj),
76144- S_IRWXU | S_IRUGO | S_IXUGO, kobj, ns);
76145+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
76146+ parent_name = parent->name;
76147+ mode = S_IRWXU;
76148+
76149+ if ((!strcmp(parent_name, "") && (!strcmp(name, "devices") || !strcmp(name, "fs"))) ||
76150+ (!strcmp(parent_name, "devices") && !strcmp(name, "system")) ||
76151+ (!strcmp(parent_name, "fs") && (!strcmp(name, "selinux") || !strcmp(name, "fuse") || !strcmp(name, "ecryptfs"))) ||
76152+ (!strcmp(parent_name, "system") && !strcmp(name, "cpu")))
76153+ mode = S_IRWXU | S_IRUGO | S_IXUGO;
76154+ if (!grsec_enable_sysfs_restrict)
76155+ mode = S_IRWXU | S_IRUGO | S_IXUGO;
76156+#endif
76157+
76158+ kn = kernfs_create_dir_ns(parent, name,
76159+ mode, kobj, ns);
76160 if (IS_ERR(kn)) {
76161 if (PTR_ERR(kn) == -EEXIST)
76162- sysfs_warn_dup(parent, kobject_name(kobj));
76163+ sysfs_warn_dup(parent, name);
76164 return PTR_ERR(kn);
76165 }
76166
76167diff --git a/fs/sysv/sysv.h b/fs/sysv/sysv.h
76168index 69d4889..a810bd4 100644
76169--- a/fs/sysv/sysv.h
76170+++ b/fs/sysv/sysv.h
76171@@ -188,7 +188,7 @@ static inline u32 PDP_swab(u32 x)
76172 #endif
76173 }
76174
76175-static inline __u32 fs32_to_cpu(struct sysv_sb_info *sbi, __fs32 n)
76176+static inline __u32 __intentional_overflow(-1) fs32_to_cpu(struct sysv_sb_info *sbi, __fs32 n)
76177 {
76178 if (sbi->s_bytesex == BYTESEX_PDP)
76179 return PDP_swab((__force __u32)n);
76180diff --git a/fs/ubifs/io.c b/fs/ubifs/io.c
76181index fb08b0c..65fcc7e 100644
76182--- a/fs/ubifs/io.c
76183+++ b/fs/ubifs/io.c
76184@@ -155,7 +155,7 @@ int ubifs_leb_change(struct ubifs_info *c, int lnum, const void *buf, int len)
76185 return err;
76186 }
76187
76188-int ubifs_leb_unmap(struct ubifs_info *c, int lnum)
76189+int __intentional_overflow(-1) ubifs_leb_unmap(struct ubifs_info *c, int lnum)
76190 {
76191 int err;
76192
76193diff --git a/fs/udf/misc.c b/fs/udf/misc.c
76194index c175b4d..8f36a16 100644
76195--- a/fs/udf/misc.c
76196+++ b/fs/udf/misc.c
76197@@ -289,7 +289,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
76198
76199 u8 udf_tag_checksum(const struct tag *t)
76200 {
76201- u8 *data = (u8 *)t;
76202+ const u8 *data = (const u8 *)t;
76203 u8 checksum = 0;
76204 int i;
76205 for (i = 0; i < sizeof(struct tag); ++i)
76206diff --git a/fs/ufs/swab.h b/fs/ufs/swab.h
76207index 8d974c4..b82f6ec 100644
76208--- a/fs/ufs/swab.h
76209+++ b/fs/ufs/swab.h
76210@@ -22,7 +22,7 @@ enum {
76211 BYTESEX_BE
76212 };
76213
76214-static inline u64
76215+static inline u64 __intentional_overflow(-1)
76216 fs64_to_cpu(struct super_block *sbp, __fs64 n)
76217 {
76218 if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
76219@@ -40,7 +40,7 @@ cpu_to_fs64(struct super_block *sbp, u64 n)
76220 return (__force __fs64)cpu_to_be64(n);
76221 }
76222
76223-static inline u32
76224+static inline u32 __intentional_overflow(-1)
76225 fs32_to_cpu(struct super_block *sbp, __fs32 n)
76226 {
76227 if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
76228diff --git a/fs/utimes.c b/fs/utimes.c
76229index aa138d6..5f3a811 100644
76230--- a/fs/utimes.c
76231+++ b/fs/utimes.c
76232@@ -1,6 +1,7 @@
76233 #include <linux/compiler.h>
76234 #include <linux/file.h>
76235 #include <linux/fs.h>
76236+#include <linux/security.h>
76237 #include <linux/linkage.h>
76238 #include <linux/mount.h>
76239 #include <linux/namei.h>
76240@@ -103,6 +104,12 @@ static int utimes_common(struct path *path, struct timespec *times)
76241 }
76242 }
76243 retry_deleg:
76244+
76245+ if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
76246+ error = -EACCES;
76247+ goto mnt_drop_write_and_out;
76248+ }
76249+
76250 mutex_lock(&inode->i_mutex);
76251 error = notify_change(path->dentry, &newattrs, &delegated_inode);
76252 mutex_unlock(&inode->i_mutex);
76253diff --git a/fs/xattr.c b/fs/xattr.c
76254index 4ef6985..a6cd6567 100644
76255--- a/fs/xattr.c
76256+++ b/fs/xattr.c
76257@@ -227,6 +227,27 @@ int vfs_xattr_cmp(struct dentry *dentry, const char *xattr_name,
76258 return rc;
76259 }
76260
76261+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
76262+ssize_t
76263+pax_getxattr(struct dentry *dentry, void *value, size_t size)
76264+{
76265+ struct inode *inode = dentry->d_inode;
76266+ ssize_t error;
76267+
76268+ error = inode_permission(inode, MAY_EXEC);
76269+ if (error)
76270+ return error;
76271+
76272+ if (inode->i_op->getxattr)
76273+ error = inode->i_op->getxattr(dentry, XATTR_NAME_PAX_FLAGS, value, size);
76274+ else
76275+ error = -EOPNOTSUPP;
76276+
76277+ return error;
76278+}
76279+EXPORT_SYMBOL(pax_getxattr);
76280+#endif
76281+
76282 ssize_t
76283 vfs_getxattr(struct dentry *dentry, const char *name, void *value, size_t size)
76284 {
76285@@ -319,7 +340,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
76286 * Extended attribute SET operations
76287 */
76288 static long
76289-setxattr(struct dentry *d, const char __user *name, const void __user *value,
76290+setxattr(struct path *path, const char __user *name, const void __user *value,
76291 size_t size, int flags)
76292 {
76293 int error;
76294@@ -355,7 +376,12 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
76295 posix_acl_fix_xattr_from_user(kvalue, size);
76296 }
76297
76298- error = vfs_setxattr(d, kname, kvalue, size, flags);
76299+ if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
76300+ error = -EACCES;
76301+ goto out;
76302+ }
76303+
76304+ error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
76305 out:
76306 if (vvalue)
76307 vfree(vvalue);
76308@@ -376,7 +402,7 @@ retry:
76309 return error;
76310 error = mnt_want_write(path.mnt);
76311 if (!error) {
76312- error = setxattr(path.dentry, name, value, size, flags);
76313+ error = setxattr(&path, name, value, size, flags);
76314 mnt_drop_write(path.mnt);
76315 }
76316 path_put(&path);
76317@@ -412,7 +438,7 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
76318 audit_file(f.file);
76319 error = mnt_want_write_file(f.file);
76320 if (!error) {
76321- error = setxattr(f.file->f_path.dentry, name, value, size, flags);
76322+ error = setxattr(&f.file->f_path, name, value, size, flags);
76323 mnt_drop_write_file(f.file);
76324 }
76325 fdput(f);
76326@@ -598,7 +624,7 @@ SYSCALL_DEFINE3(flistxattr, int, fd, char __user *, list, size_t, size)
76327 * Extended attribute REMOVE operations
76328 */
76329 static long
76330-removexattr(struct dentry *d, const char __user *name)
76331+removexattr(struct path *path, const char __user *name)
76332 {
76333 int error;
76334 char kname[XATTR_NAME_MAX + 1];
76335@@ -609,7 +635,10 @@ removexattr(struct dentry *d, const char __user *name)
76336 if (error < 0)
76337 return error;
76338
76339- return vfs_removexattr(d, kname);
76340+ if (!gr_acl_handle_removexattr(path->dentry, path->mnt))
76341+ return -EACCES;
76342+
76343+ return vfs_removexattr(path->dentry, kname);
76344 }
76345
76346 static int path_removexattr(const char __user *pathname,
76347@@ -623,7 +652,7 @@ retry:
76348 return error;
76349 error = mnt_want_write(path.mnt);
76350 if (!error) {
76351- error = removexattr(path.dentry, name);
76352+ error = removexattr(&path, name);
76353 mnt_drop_write(path.mnt);
76354 }
76355 path_put(&path);
76356@@ -649,14 +678,16 @@ SYSCALL_DEFINE2(lremovexattr, const char __user *, pathname,
76357 SYSCALL_DEFINE2(fremovexattr, int, fd, const char __user *, name)
76358 {
76359 struct fd f = fdget(fd);
76360+ struct path *path;
76361 int error = -EBADF;
76362
76363 if (!f.file)
76364 return error;
76365+ path = &f.file->f_path;
76366 audit_file(f.file);
76367 error = mnt_want_write_file(f.file);
76368 if (!error) {
76369- error = removexattr(f.file->f_path.dentry, name);
76370+ error = removexattr(path, name);
76371 mnt_drop_write_file(f.file);
76372 }
76373 fdput(f);
76374diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
76375index 61ec015..7c18807 100644
76376--- a/fs/xfs/libxfs/xfs_bmap.c
76377+++ b/fs/xfs/libxfs/xfs_bmap.c
76378@@ -580,7 +580,7 @@ xfs_bmap_validate_ret(
76379
76380 #else
76381 #define xfs_bmap_check_leaf_extents(cur, ip, whichfork) do { } while (0)
76382-#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
76383+#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do { } while (0)
76384 #endif /* DEBUG */
76385
76386 /*
76387diff --git a/fs/xfs/xfs_dir2_readdir.c b/fs/xfs/xfs_dir2_readdir.c
76388index 098cd78..724d3f8 100644
76389--- a/fs/xfs/xfs_dir2_readdir.c
76390+++ b/fs/xfs/xfs_dir2_readdir.c
76391@@ -140,7 +140,12 @@ xfs_dir2_sf_getdents(
76392 ino = dp->d_ops->sf_get_ino(sfp, sfep);
76393 filetype = dp->d_ops->sf_get_ftype(sfep);
76394 ctx->pos = off & 0x7fffffff;
76395- if (!dir_emit(ctx, (char *)sfep->name, sfep->namelen, ino,
76396+ if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
76397+ char name[sfep->namelen];
76398+ memcpy(name, sfep->name, sfep->namelen);
76399+ if (!dir_emit(ctx, name, sfep->namelen, ino, xfs_dir3_get_dtype(dp->i_mount, filetype)))
76400+ return 0;
76401+ } else if (!dir_emit(ctx, (char *)sfep->name, sfep->namelen, ino,
76402 xfs_dir3_get_dtype(dp->i_mount, filetype)))
76403 return 0;
76404 sfep = dp->d_ops->sf_nextentry(sfp, sfep);
76405diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
76406index ac4feae..386d551 100644
76407--- a/fs/xfs/xfs_ioctl.c
76408+++ b/fs/xfs/xfs_ioctl.c
76409@@ -120,7 +120,7 @@ xfs_find_handle(
76410 }
76411
76412 error = -EFAULT;
76413- if (copy_to_user(hreq->ohandle, &handle, hsize) ||
76414+ if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
76415 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
76416 goto out_put;
76417
76418diff --git a/fs/xfs/xfs_linux.h b/fs/xfs/xfs_linux.h
76419index c31d2c2..6ec8f62 100644
76420--- a/fs/xfs/xfs_linux.h
76421+++ b/fs/xfs/xfs_linux.h
76422@@ -234,7 +234,7 @@ static inline kgid_t xfs_gid_to_kgid(__uint32_t gid)
76423 * of the compiler which do not like us using do_div in the middle
76424 * of large functions.
76425 */
76426-static inline __u32 xfs_do_div(void *a, __u32 b, int n)
76427+static inline __u32 __intentional_overflow(-1) xfs_do_div(void *a, __u32 b, int n)
76428 {
76429 __u32 mod;
76430
76431@@ -290,7 +290,7 @@ static inline __u32 xfs_do_mod(void *a, __u32 b, int n)
76432 return 0;
76433 }
76434 #else
76435-static inline __u32 xfs_do_div(void *a, __u32 b, int n)
76436+static inline __u32 __intentional_overflow(-1) xfs_do_div(void *a, __u32 b, int n)
76437 {
76438 __u32 mod;
76439
76440diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
76441new file mode 100644
76442index 0000000..31f8fe4
76443--- /dev/null
76444+++ b/grsecurity/Kconfig
76445@@ -0,0 +1,1182 @@
76446+#
76447+# grecurity configuration
76448+#
76449+menu "Memory Protections"
76450+depends on GRKERNSEC
76451+
76452+config GRKERNSEC_KMEM
76453+ bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
76454+ default y if GRKERNSEC_CONFIG_AUTO
76455+ select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
76456+ help
76457+ If you say Y here, /dev/kmem and /dev/mem won't be allowed to
76458+ be written to or read from to modify or leak the contents of the running
76459+ kernel. /dev/port will also not be allowed to be opened, writing to
76460+ /dev/cpu/*/msr will be prevented, and support for kexec will be removed.
76461+ If you have module support disabled, enabling this will close up several
76462+ ways that are currently used to insert malicious code into the running
76463+ kernel.
76464+
76465+ Even with this feature enabled, we still highly recommend that
76466+ you use the RBAC system, as it is still possible for an attacker to
76467+ modify the running kernel through other more obscure methods.
76468+
76469+ It is highly recommended that you say Y here if you meet all the
76470+ conditions above.
76471+
76472+config GRKERNSEC_VM86
76473+ bool "Restrict VM86 mode"
76474+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
76475+ depends on X86_32
76476+
76477+ help
76478+ If you say Y here, only processes with CAP_SYS_RAWIO will be able to
76479+ make use of a special execution mode on 32bit x86 processors called
76480+ Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
76481+ video cards and will still work with this option enabled. The purpose
76482+ of the option is to prevent exploitation of emulation errors in
76483+ virtualization of vm86 mode like the one discovered in VMWare in 2009.
76484+ Nearly all users should be able to enable this option.
76485+
76486+config GRKERNSEC_IO
76487+ bool "Disable privileged I/O"
76488+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
76489+ depends on X86
76490+ select RTC_CLASS
76491+ select RTC_INTF_DEV
76492+ select RTC_DRV_CMOS
76493+
76494+ help
76495+ If you say Y here, all ioperm and iopl calls will return an error.
76496+ Ioperm and iopl can be used to modify the running kernel.
76497+ Unfortunately, some programs need this access to operate properly,
76498+ the most notable of which are XFree86 and hwclock. hwclock can be
76499+ remedied by having RTC support in the kernel, so real-time
76500+ clock support is enabled if this option is enabled, to ensure
76501+ that hwclock operates correctly. If hwclock still does not work,
76502+ either update udev or symlink /dev/rtc to /dev/rtc0.
76503+
76504+ If you're using XFree86 or a version of Xorg from 2012 or earlier,
76505+ you may not be able to boot into a graphical environment with this
76506+ option enabled. In this case, you should use the RBAC system instead.
76507+
76508+config GRKERNSEC_BPF_HARDEN
76509+ bool "Harden BPF interpreter"
76510+ default y if GRKERNSEC_CONFIG_AUTO
76511+ help
76512+ Unlike previous versions of grsecurity that hardened both the BPF
76513+ interpreted code against corruption at rest as well as the JIT code
76514+ against JIT-spray attacks and attacker-controlled immediate values
76515+ for ROP, this feature will enforce disabling of the new eBPF JIT engine
76516+ and will ensure the interpreted code is read-only at rest. This feature
76517+ may be removed at a later time when eBPF stabilizes to entirely revert
76518+ back to the more secure pre-3.16 BPF interpreter/JIT.
76519+
76520+ If you're using KERNEXEC, it's recommended that you enable this option
76521+ to supplement the hardening of the kernel.
76522+
76523+config GRKERNSEC_PERF_HARDEN
76524+ bool "Disable unprivileged PERF_EVENTS usage by default"
76525+ default y if GRKERNSEC_CONFIG_AUTO
76526+ depends on PERF_EVENTS
76527+ help
76528+ If you say Y here, the range of acceptable values for the
76529+ /proc/sys/kernel/perf_event_paranoid sysctl will be expanded to allow and
76530+ default to a new value: 3. When the sysctl is set to this value, no
76531+ unprivileged use of the PERF_EVENTS syscall interface will be permitted.
76532+
76533+ Though PERF_EVENTS can be used legitimately for performance monitoring
76534+ and low-level application profiling, it is forced on regardless of
76535+ configuration, has been at fault for several vulnerabilities, and
76536+ creates new opportunities for side channels and other information leaks.
76537+
76538+ This feature puts PERF_EVENTS into a secure default state and permits
76539+ the administrator to change out of it temporarily if unprivileged
76540+ application profiling is needed.
76541+
76542+config GRKERNSEC_RAND_THREADSTACK
76543+ bool "Insert random gaps between thread stacks"
76544+ default y if GRKERNSEC_CONFIG_AUTO
76545+ depends on PAX_RANDMMAP && !PPC
76546+ help
76547+ If you say Y here, a random-sized gap will be enforced between allocated
76548+ thread stacks. Glibc's NPTL and other threading libraries that
76549+ pass MAP_STACK to the kernel for thread stack allocation are supported.
76550+ The implementation currently provides 8 bits of entropy for the gap.
76551+
76552+ Many distributions do not compile threaded remote services with the
76553+ -fstack-check argument to GCC, causing the variable-sized stack-based
76554+ allocator, alloca(), to not probe the stack on allocation. This
76555+ permits an unbounded alloca() to skip over any guard page and potentially
76556+ modify another thread's stack reliably. An enforced random gap
76557+ reduces the reliability of such an attack and increases the chance
76558+ that such a read/write to another thread's stack instead lands in
76559+ an unmapped area, causing a crash and triggering grsecurity's
76560+ anti-bruteforcing logic.
76561+
76562+config GRKERNSEC_PROC_MEMMAP
76563+ bool "Harden ASLR against information leaks and entropy reduction"
76564+ default y if (GRKERNSEC_CONFIG_AUTO || PAX_NOEXEC || PAX_ASLR)
76565+ depends on PAX_NOEXEC || PAX_ASLR
76566+ help
76567+ If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
76568+ give no information about the addresses of its mappings if
76569+ PaX features that rely on random addresses are enabled on the task.
76570+ In addition to sanitizing this information and disabling other
76571+ dangerous sources of information, this option causes reads of sensitive
76572+ /proc/<pid> entries where the file descriptor was opened in a different
76573+ task than the one performing the read. Such attempts are logged.
76574+ This option also limits argv/env strings for suid/sgid binaries
76575+ to 512KB to prevent a complete exhaustion of the stack entropy provided
76576+ by ASLR. Finally, it places an 8MB stack resource limit on suid/sgid
76577+ binaries to prevent alternative mmap layouts from being abused.
76578+
76579+ If you use PaX it is essential that you say Y here as it closes up
76580+ several holes that make full ASLR useless locally.
76581+
76582+
76583+config GRKERNSEC_KSTACKOVERFLOW
76584+ bool "Prevent kernel stack overflows"
76585+ default y if GRKERNSEC_CONFIG_AUTO
76586+ depends on !IA64 && 64BIT
76587+ help
76588+ If you say Y here, the kernel's process stacks will be allocated
76589+ with vmalloc instead of the kernel's default allocator. This
76590+ introduces guard pages that in combination with the alloca checking
76591+ of the STACKLEAK feature prevents all forms of kernel process stack
76592+ overflow abuse. Note that this is different from kernel stack
76593+ buffer overflows.
76594+
76595+config GRKERNSEC_BRUTE
76596+ bool "Deter exploit bruteforcing"
76597+ default y if GRKERNSEC_CONFIG_AUTO
76598+ help
76599+ If you say Y here, attempts to bruteforce exploits against forking
76600+ daemons such as apache or sshd, as well as against suid/sgid binaries
76601+ will be deterred. When a child of a forking daemon is killed by PaX
76602+ or crashes due to an illegal instruction or other suspicious signal,
76603+ the parent process will be delayed 30 seconds upon every subsequent
76604+ fork until the administrator is able to assess the situation and
76605+ restart the daemon.
76606+ In the suid/sgid case, the attempt is logged, the user has all their
76607+ existing instances of the suid/sgid binary terminated and will
76608+ be unable to execute any suid/sgid binaries for 15 minutes.
76609+
76610+ It is recommended that you also enable signal logging in the auditing
76611+ section so that logs are generated when a process triggers a suspicious
76612+ signal.
76613+ If the sysctl option is enabled, a sysctl option with name
76614+ "deter_bruteforce" is created.
76615+
76616+config GRKERNSEC_MODHARDEN
76617+ bool "Harden module auto-loading"
76618+ default y if GRKERNSEC_CONFIG_AUTO
76619+ depends on MODULES
76620+ help
76621+ If you say Y here, module auto-loading in response to use of some
76622+ feature implemented by an unloaded module will be restricted to
76623+ root users. Enabling this option helps defend against attacks
76624+ by unprivileged users who abuse the auto-loading behavior to
76625+ cause a vulnerable module to load that is then exploited.
76626+
76627+ If this option prevents a legitimate use of auto-loading for a
76628+ non-root user, the administrator can execute modprobe manually
76629+ with the exact name of the module mentioned in the alert log.
76630+ Alternatively, the administrator can add the module to the list
76631+ of modules loaded at boot by modifying init scripts.
76632+
76633+ Modification of init scripts will most likely be needed on
76634+ Ubuntu servers with encrypted home directory support enabled,
76635+ as the first non-root user logging in will cause the ecb(aes),
76636+ ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
76637+
76638+config GRKERNSEC_HIDESYM
76639+ bool "Hide kernel symbols"
76640+ default y if GRKERNSEC_CONFIG_AUTO
76641+ select PAX_USERCOPY_SLABS
76642+ help
76643+ If you say Y here, getting information on loaded modules, and
76644+ displaying all kernel symbols through a syscall will be restricted
76645+ to users with CAP_SYS_MODULE. For software compatibility reasons,
76646+ /proc/kallsyms will be restricted to the root user. The RBAC
76647+ system can hide that entry even from root.
76648+
76649+ This option also prevents leaking of kernel addresses through
76650+ several /proc entries.
76651+
76652+ Note that this option is only effective provided the following
76653+ conditions are met:
76654+ 1) The kernel using grsecurity is not precompiled by some distribution
76655+ 2) You have also enabled GRKERNSEC_DMESG
76656+ 3) You are using the RBAC system and hiding other files such as your
76657+ kernel image and System.map. Alternatively, enabling this option
76658+ causes the permissions on /boot, /lib/modules, and the kernel
76659+ source directory to change at compile time to prevent
76660+ reading by non-root users.
76661+ If the above conditions are met, this option will aid in providing a
76662+ useful protection against local kernel exploitation of overflows
76663+ and arbitrary read/write vulnerabilities.
76664+
76665+ It is highly recommended that you enable GRKERNSEC_PERF_HARDEN
76666+ in addition to this feature.
76667+
76668+config GRKERNSEC_RANDSTRUCT
76669+ bool "Randomize layout of sensitive kernel structures"
76670+ default y if GRKERNSEC_CONFIG_AUTO
76671+ select GRKERNSEC_HIDESYM
76672+ select MODVERSIONS if MODULES
76673+ help
76674+ If you say Y here, the layouts of a number of sensitive kernel
76675+ structures (task, fs, cred, etc) and all structures composed entirely
76676+ of function pointers (aka "ops" structs) will be randomized at compile-time.
76677+ This can introduce the requirement of an additional infoleak
76678+ vulnerability for exploits targeting these structure types.
76679+
76680+ Enabling this feature will introduce some performance impact, slightly
76681+ increase memory usage, and prevent the use of forensic tools like
76682+ Volatility against the system (unless the kernel source tree isn't
76683+ cleaned after kernel installation).
76684+
76685+ The seed used for compilation is located at tools/gcc/randomize_layout_seed.h.
76686+ It remains after a make clean to allow for external modules to be compiled
76687+ with the existing seed and will be removed by a make mrproper or
76688+ make distclean.
76689+
76690+ Note that the implementation requires gcc 4.6.4. or newer. You may need
76691+ to install the supporting headers explicitly in addition to the normal
76692+ gcc package.
76693+
76694+config GRKERNSEC_RANDSTRUCT_PERFORMANCE
76695+ bool "Use cacheline-aware structure randomization"
76696+ depends on GRKERNSEC_RANDSTRUCT
76697+ default y if GRKERNSEC_CONFIG_PRIORITY_PERF
76698+ help
76699+ If you say Y here, the RANDSTRUCT randomization will make a best effort
76700+ at restricting randomization to cacheline-sized groups of elements. It
76701+ will further not randomize bitfields in structures. This reduces the
76702+ performance hit of RANDSTRUCT at the cost of weakened randomization.
76703+
76704+config GRKERNSEC_KERN_LOCKOUT
76705+ bool "Active kernel exploit response"
76706+ default y if GRKERNSEC_CONFIG_AUTO
76707+ depends on X86 || ARM || PPC || SPARC
76708+ help
76709+ If you say Y here, when a PaX alert is triggered due to suspicious
76710+ activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
76711+ or an OOPS occurs due to bad memory accesses, instead of just
76712+ terminating the offending process (and potentially allowing
76713+ a subsequent exploit from the same user), we will take one of two
76714+ actions:
76715+ If the user was root, we will panic the system
76716+ If the user was non-root, we will log the attempt, terminate
76717+ all processes owned by the user, then prevent them from creating
76718+ any new processes until the system is restarted
76719+ This deters repeated kernel exploitation/bruteforcing attempts
76720+ and is useful for later forensics.
76721+
76722+config GRKERNSEC_OLD_ARM_USERLAND
76723+ bool "Old ARM userland compatibility"
76724+ depends on ARM && (CPU_V6 || CPU_V6K || CPU_V7)
76725+ help
76726+ If you say Y here, stubs of executable code to perform such operations
76727+ as "compare-exchange" will be placed at fixed locations in the ARM vector
76728+ table. This is unfortunately needed for old ARM userland meant to run
76729+ across a wide range of processors. Without this option enabled,
76730+ the get_tls and data memory barrier stubs will be emulated by the kernel,
76731+ which is enough for Linaro userlands or other userlands designed for v6
76732+ and newer ARM CPUs. It's recommended that you try without this option enabled
76733+ first, and only enable it if your userland does not boot (it will likely fail
76734+ at init time).
76735+
76736+endmenu
76737+menu "Role Based Access Control Options"
76738+depends on GRKERNSEC
76739+
76740+config GRKERNSEC_RBAC_DEBUG
76741+ bool
76742+
76743+config GRKERNSEC_NO_RBAC
76744+ bool "Disable RBAC system"
76745+ help
76746+ If you say Y here, the /dev/grsec device will be removed from the kernel,
76747+ preventing the RBAC system from being enabled. You should only say Y
76748+ here if you have no intention of using the RBAC system, so as to prevent
76749+ an attacker with root access from misusing the RBAC system to hide files
76750+ and processes when loadable module support and /dev/[k]mem have been
76751+ locked down.
76752+
76753+config GRKERNSEC_ACL_HIDEKERN
76754+ bool "Hide kernel processes"
76755+ help
76756+ If you say Y here, all kernel threads will be hidden to all
76757+ processes but those whose subject has the "view hidden processes"
76758+ flag.
76759+
76760+config GRKERNSEC_ACL_MAXTRIES
76761+ int "Maximum tries before password lockout"
76762+ default 3
76763+ help
76764+ This option enforces the maximum number of times a user can attempt
76765+ to authorize themselves with the grsecurity RBAC system before being
76766+ denied the ability to attempt authorization again for a specified time.
76767+ The lower the number, the harder it will be to brute-force a password.
76768+
76769+config GRKERNSEC_ACL_TIMEOUT
76770+ int "Time to wait after max password tries, in seconds"
76771+ default 30
76772+ help
76773+ This option specifies the time the user must wait after attempting to
76774+ authorize to the RBAC system with the maximum number of invalid
76775+ passwords. The higher the number, the harder it will be to brute-force
76776+ a password.
76777+
76778+endmenu
76779+menu "Filesystem Protections"
76780+depends on GRKERNSEC
76781+
76782+config GRKERNSEC_PROC
76783+ bool "Proc restrictions"
76784+ default y if GRKERNSEC_CONFIG_AUTO
76785+ help
76786+ If you say Y here, the permissions of the /proc filesystem
76787+ will be altered to enhance system security and privacy. You MUST
76788+ choose either a user only restriction or a user and group restriction.
76789+ Depending upon the option you choose, you can either restrict users to
76790+ see only the processes they themselves run, or choose a group that can
76791+ view all processes and files normally restricted to root if you choose
76792+ the "restrict to user only" option. NOTE: If you're running identd or
76793+ ntpd as a non-root user, you will have to run it as the group you
76794+ specify here.
76795+
76796+config GRKERNSEC_PROC_USER
76797+ bool "Restrict /proc to user only"
76798+ depends on GRKERNSEC_PROC
76799+ help
76800+ If you say Y here, non-root users will only be able to view their own
76801+ processes, and restricts them from viewing network-related information,
76802+ and viewing kernel symbol and module information.
76803+
76804+config GRKERNSEC_PROC_USERGROUP
76805+ bool "Allow special group"
76806+ default y if GRKERNSEC_CONFIG_AUTO
76807+ depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
76808+ help
76809+ If you say Y here, you will be able to select a group that will be
76810+ able to view all processes and network-related information. If you've
76811+ enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
76812+ remain hidden. This option is useful if you want to run identd as
76813+ a non-root user. The group you select may also be chosen at boot time
76814+ via "grsec_proc_gid=" on the kernel commandline.
76815+
76816+config GRKERNSEC_PROC_GID
76817+ int "GID for special group"
76818+ depends on GRKERNSEC_PROC_USERGROUP
76819+ default 1001
76820+
76821+config GRKERNSEC_PROC_ADD
76822+ bool "Additional restrictions"
76823+ default y if GRKERNSEC_CONFIG_AUTO
76824+ depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
76825+ help
76826+ If you say Y here, additional restrictions will be placed on
76827+ /proc that keep normal users from viewing device information and
76828+ slabinfo information that could be useful for exploits.
76829+
76830+config GRKERNSEC_LINK
76831+ bool "Linking restrictions"
76832+ default y if GRKERNSEC_CONFIG_AUTO
76833+ help
76834+ If you say Y here, /tmp race exploits will be prevented, since users
76835+ will no longer be able to follow symlinks owned by other users in
76836+ world-writable +t directories (e.g. /tmp), unless the owner of the
76837+ symlink is the owner of the directory. users will also not be
76838+ able to hardlink to files they do not own. If the sysctl option is
76839+ enabled, a sysctl option with name "linking_restrictions" is created.
76840+
76841+config GRKERNSEC_SYMLINKOWN
76842+ bool "Kernel-enforced SymlinksIfOwnerMatch"
76843+ default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
76844+ help
76845+ Apache's SymlinksIfOwnerMatch option has an inherent race condition
76846+ that prevents it from being used as a security feature. As Apache
76847+ verifies the symlink by performing a stat() against the target of
76848+ the symlink before it is followed, an attacker can setup a symlink
76849+ to point to a same-owned file, then replace the symlink with one
76850+ that targets another user's file just after Apache "validates" the
76851+ symlink -- a classic TOCTOU race. If you say Y here, a complete,
76852+ race-free replacement for Apache's "SymlinksIfOwnerMatch" option
76853+ will be in place for the group you specify. If the sysctl option
76854+ is enabled, a sysctl option with name "enforce_symlinksifowner" is
76855+ created.
76856+
76857+config GRKERNSEC_SYMLINKOWN_GID
76858+ int "GID for users with kernel-enforced SymlinksIfOwnerMatch"
76859+ depends on GRKERNSEC_SYMLINKOWN
76860+ default 1006
76861+ help
76862+ Setting this GID determines what group kernel-enforced
76863+ SymlinksIfOwnerMatch will be enabled for. If the sysctl option
76864+ is enabled, a sysctl option with name "symlinkown_gid" is created.
76865+
76866+config GRKERNSEC_FIFO
76867+ bool "FIFO restrictions"
76868+ default y if GRKERNSEC_CONFIG_AUTO
76869+ help
76870+ If you say Y here, users will not be able to write to FIFOs they don't
76871+ own in world-writable +t directories (e.g. /tmp), unless the owner of
76872+ the FIFO is the same owner of the directory it's held in. If the sysctl
76873+ option is enabled, a sysctl option with name "fifo_restrictions" is
76874+ created.
76875+
76876+config GRKERNSEC_SYSFS_RESTRICT
76877+ bool "Sysfs/debugfs restriction"
76878+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
76879+ depends on SYSFS
76880+ help
76881+ If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
76882+ any filesystem normally mounted under it (e.g. debugfs) will be
76883+ mostly accessible only by root. These filesystems generally provide access
76884+ to hardware and debug information that isn't appropriate for unprivileged
76885+ users of the system. Sysfs and debugfs have also become a large source
76886+ of new vulnerabilities, ranging from infoleaks to local compromise.
76887+ There has been very little oversight with an eye toward security involved
76888+ in adding new exporters of information to these filesystems, so their
76889+ use is discouraged.
76890+ For reasons of compatibility, a few directories have been whitelisted
76891+ for access by non-root users:
76892+ /sys/fs/selinux
76893+ /sys/fs/fuse
76894+ /sys/devices/system/cpu
76895+
76896+config GRKERNSEC_ROFS
76897+ bool "Runtime read-only mount protection"
76898+ depends on SYSCTL
76899+ help
76900+ If you say Y here, a sysctl option with name "romount_protect" will
76901+ be created. By setting this option to 1 at runtime, filesystems
76902+ will be protected in the following ways:
76903+ * No new writable mounts will be allowed
76904+ * Existing read-only mounts won't be able to be remounted read/write
76905+ * Write operations will be denied on all block devices
76906+ This option acts independently of grsec_lock: once it is set to 1,
76907+ it cannot be turned off. Therefore, please be mindful of the resulting
76908+ behavior if this option is enabled in an init script on a read-only
76909+ filesystem.
76910+ Also be aware that as with other root-focused features, GRKERNSEC_KMEM
76911+ and GRKERNSEC_IO should be enabled and module loading disabled via
76912+ config or at runtime.
76913+ This feature is mainly intended for secure embedded systems.
76914+
76915+
76916+config GRKERNSEC_DEVICE_SIDECHANNEL
76917+ bool "Eliminate stat/notify-based device sidechannels"
76918+ default y if GRKERNSEC_CONFIG_AUTO
76919+ help
76920+ If you say Y here, timing analyses on block or character
76921+ devices like /dev/ptmx using stat or inotify/dnotify/fanotify
76922+ will be thwarted for unprivileged users. If a process without
76923+ CAP_MKNOD stats such a device, the last access and last modify times
76924+ will match the device's create time. No access or modify events
76925+ will be triggered through inotify/dnotify/fanotify for such devices.
76926+ This feature will prevent attacks that may at a minimum
76927+ allow an attacker to determine the administrator's password length.
76928+
76929+config GRKERNSEC_CHROOT
76930+ bool "Chroot jail restrictions"
76931+ default y if GRKERNSEC_CONFIG_AUTO
76932+ help
76933+ If you say Y here, you will be able to choose several options that will
76934+ make breaking out of a chrooted jail much more difficult. If you
76935+ encounter no software incompatibilities with the following options, it
76936+ is recommended that you enable each one.
76937+
76938+ Note that the chroot restrictions are not intended to apply to "chroots"
76939+ to directories that are simple bind mounts of the global root filesystem.
76940+ For several other reasons, a user shouldn't expect any significant
76941+ security by performing such a chroot.
76942+
76943+config GRKERNSEC_CHROOT_MOUNT
76944+ bool "Deny mounts"
76945+ default y if GRKERNSEC_CONFIG_AUTO
76946+ depends on GRKERNSEC_CHROOT
76947+ help
76948+ If you say Y here, processes inside a chroot will not be able to
76949+ mount or remount filesystems. If the sysctl option is enabled, a
76950+ sysctl option with name "chroot_deny_mount" is created.
76951+
76952+config GRKERNSEC_CHROOT_DOUBLE
76953+ bool "Deny double-chroots"
76954+ default y if GRKERNSEC_CONFIG_AUTO
76955+ depends on GRKERNSEC_CHROOT
76956+ help
76957+ If you say Y here, processes inside a chroot will not be able to chroot
76958+ again outside the chroot. This is a widely used method of breaking
76959+ out of a chroot jail and should not be allowed. If the sysctl
76960+ option is enabled, a sysctl option with name
76961+ "chroot_deny_chroot" is created.
76962+
76963+config GRKERNSEC_CHROOT_PIVOT
76964+ bool "Deny pivot_root in chroot"
76965+ default y if GRKERNSEC_CONFIG_AUTO
76966+ depends on GRKERNSEC_CHROOT
76967+ help
76968+ If you say Y here, processes inside a chroot will not be able to use
76969+ a function called pivot_root() that was introduced in Linux 2.3.41. It
76970+ works similar to chroot in that it changes the root filesystem. This
76971+ function could be misused in a chrooted process to attempt to break out
76972+ of the chroot, and therefore should not be allowed. If the sysctl
76973+ option is enabled, a sysctl option with name "chroot_deny_pivot" is
76974+ created.
76975+
76976+config GRKERNSEC_CHROOT_CHDIR
76977+ bool "Enforce chdir(\"/\") on all chroots"
76978+ default y if GRKERNSEC_CONFIG_AUTO
76979+ depends on GRKERNSEC_CHROOT
76980+ help
76981+ If you say Y here, the current working directory of all newly-chrooted
76982+ applications will be set to the the root directory of the chroot.
76983+ The man page on chroot(2) states:
76984+ Note that this call does not change the current working
76985+ directory, so that `.' can be outside the tree rooted at
76986+ `/'. In particular, the super-user can escape from a
76987+ `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
76988+
76989+ It is recommended that you say Y here, since it's not known to break
76990+ any software. If the sysctl option is enabled, a sysctl option with
76991+ name "chroot_enforce_chdir" is created.
76992+
76993+config GRKERNSEC_CHROOT_CHMOD
76994+ bool "Deny (f)chmod +s"
76995+ default y if GRKERNSEC_CONFIG_AUTO
76996+ depends on GRKERNSEC_CHROOT
76997+ help
76998+ If you say Y here, processes inside a chroot will not be able to chmod
76999+ or fchmod files to make them have suid or sgid bits. This protects
77000+ against another published method of breaking a chroot. If the sysctl
77001+ option is enabled, a sysctl option with name "chroot_deny_chmod" is
77002+ created.
77003+
77004+config GRKERNSEC_CHROOT_FCHDIR
77005+ bool "Deny fchdir and fhandle out of chroot"
77006+ default y if GRKERNSEC_CONFIG_AUTO
77007+ depends on GRKERNSEC_CHROOT
77008+ help
77009+ If you say Y here, a well-known method of breaking chroots by fchdir'ing
77010+ to a file descriptor of the chrooting process that points to a directory
77011+ outside the filesystem will be stopped. Additionally, this option prevents
77012+ use of the recently-created syscall for opening files by a guessable "file
77013+ handle" inside a chroot. If the sysctl option is enabled, a sysctl option
77014+ with name "chroot_deny_fchdir" is created.
77015+
77016+config GRKERNSEC_CHROOT_MKNOD
77017+ bool "Deny mknod"
77018+ default y if GRKERNSEC_CONFIG_AUTO
77019+ depends on GRKERNSEC_CHROOT
77020+ help
77021+ If you say Y here, processes inside a chroot will not be allowed to
77022+ mknod. The problem with using mknod inside a chroot is that it
77023+ would allow an attacker to create a device entry that is the same
77024+ as one on the physical root of your system, which could range from
77025+ anything from the console device to a device for your harddrive (which
77026+ they could then use to wipe the drive or steal data). It is recommended
77027+ that you say Y here, unless you run into software incompatibilities.
77028+ If the sysctl option is enabled, a sysctl option with name
77029+ "chroot_deny_mknod" is created.
77030+
77031+config GRKERNSEC_CHROOT_SHMAT
77032+ bool "Deny shmat() out of chroot"
77033+ default y if GRKERNSEC_CONFIG_AUTO
77034+ depends on GRKERNSEC_CHROOT
77035+ help
77036+ If you say Y here, processes inside a chroot will not be able to attach
77037+ to shared memory segments that were created outside of the chroot jail.
77038+ It is recommended that you say Y here. If the sysctl option is enabled,
77039+ a sysctl option with name "chroot_deny_shmat" is created.
77040+
77041+config GRKERNSEC_CHROOT_UNIX
77042+ bool "Deny access to abstract AF_UNIX sockets out of chroot"
77043+ default y if GRKERNSEC_CONFIG_AUTO
77044+ depends on GRKERNSEC_CHROOT
77045+ help
77046+ If you say Y here, processes inside a chroot will not be able to
77047+ connect to abstract (meaning not belonging to a filesystem) Unix
77048+ domain sockets that were bound outside of a chroot. It is recommended
77049+ that you say Y here. If the sysctl option is enabled, a sysctl option
77050+ with name "chroot_deny_unix" is created.
77051+
77052+config GRKERNSEC_CHROOT_FINDTASK
77053+ bool "Protect outside processes"
77054+ default y if GRKERNSEC_CONFIG_AUTO
77055+ depends on GRKERNSEC_CHROOT
77056+ help
77057+ If you say Y here, processes inside a chroot will not be able to
77058+ kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
77059+ getsid, or view any process outside of the chroot. If the sysctl
77060+ option is enabled, a sysctl option with name "chroot_findtask" is
77061+ created.
77062+
77063+config GRKERNSEC_CHROOT_NICE
77064+ bool "Restrict priority changes"
77065+ default y if GRKERNSEC_CONFIG_AUTO
77066+ depends on GRKERNSEC_CHROOT
77067+ help
77068+ If you say Y here, processes inside a chroot will not be able to raise
77069+ the priority of processes in the chroot, or alter the priority of
77070+ processes outside the chroot. This provides more security than simply
77071+ removing CAP_SYS_NICE from the process' capability set. If the
77072+ sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
77073+ is created.
77074+
77075+config GRKERNSEC_CHROOT_SYSCTL
77076+ bool "Deny sysctl writes"
77077+ default y if GRKERNSEC_CONFIG_AUTO
77078+ depends on GRKERNSEC_CHROOT
77079+ help
77080+ If you say Y here, an attacker in a chroot will not be able to
77081+ write to sysctl entries, either by sysctl(2) or through a /proc
77082+ interface. It is strongly recommended that you say Y here. If the
77083+ sysctl option is enabled, a sysctl option with name
77084+ "chroot_deny_sysctl" is created.
77085+
77086+config GRKERNSEC_CHROOT_RENAME
77087+ bool "Deny bad renames"
77088+ default y if GRKERNSEC_CONFIG_AUTO
77089+ depends on GRKERNSEC_CHROOT
77090+ help
77091+ If you say Y here, an attacker in a chroot will not be able to
77092+ abuse the ability to create double chroots to break out of the
77093+ chroot by exploiting a race condition between a rename of a directory
77094+ within a chroot against an open of a symlink with relative path
77095+ components. This feature will likewise prevent an accomplice outside
77096+ a chroot from enabling a user inside the chroot to break out and make
77097+ use of their credentials on the global filesystem. Enabling this
77098+ feature is essential to prevent root users from breaking out of a
77099+ chroot. If the sysctl option is enabled, a sysctl option with name
77100+ "chroot_deny_bad_rename" is created.
77101+
77102+config GRKERNSEC_CHROOT_CAPS
77103+ bool "Capability restrictions"
77104+ default y if GRKERNSEC_CONFIG_AUTO
77105+ depends on GRKERNSEC_CHROOT
77106+ help
77107+ If you say Y here, the capabilities on all processes within a
77108+ chroot jail will be lowered to stop module insertion, raw i/o,
77109+ system and net admin tasks, rebooting the system, modifying immutable
77110+ files, modifying IPC owned by another, and changing the system time.
77111+ This is left an option because it can break some apps. Disable this
77112+ if your chrooted apps are having problems performing those kinds of
77113+ tasks. If the sysctl option is enabled, a sysctl option with
77114+ name "chroot_caps" is created.
77115+
77116+config GRKERNSEC_CHROOT_INITRD
77117+ bool "Exempt initrd tasks from restrictions"
77118+ default y if GRKERNSEC_CONFIG_AUTO
77119+ depends on GRKERNSEC_CHROOT && BLK_DEV_INITRD
77120+ help
77121+ If you say Y here, tasks started prior to init will be exempted from
77122+ grsecurity's chroot restrictions. This option is mainly meant to
77123+ resolve Plymouth's performing privileged operations unnecessarily
77124+ in a chroot.
77125+
77126+endmenu
77127+menu "Kernel Auditing"
77128+depends on GRKERNSEC
77129+
77130+config GRKERNSEC_AUDIT_GROUP
77131+ bool "Single group for auditing"
77132+ help
77133+ If you say Y here, the exec and chdir logging features will only operate
77134+ on a group you specify. This option is recommended if you only want to
77135+ watch certain users instead of having a large amount of logs from the
77136+ entire system. If the sysctl option is enabled, a sysctl option with
77137+ name "audit_group" is created.
77138+
77139+config GRKERNSEC_AUDIT_GID
77140+ int "GID for auditing"
77141+ depends on GRKERNSEC_AUDIT_GROUP
77142+ default 1007
77143+
77144+config GRKERNSEC_EXECLOG
77145+ bool "Exec logging"
77146+ help
77147+ If you say Y here, all execve() calls will be logged (since the
77148+ other exec*() calls are frontends to execve(), all execution
77149+ will be logged). Useful for shell-servers that like to keep track
77150+ of their users. If the sysctl option is enabled, a sysctl option with
77151+ name "exec_logging" is created.
77152+ WARNING: This option when enabled will produce a LOT of logs, especially
77153+ on an active system.
77154+
77155+config GRKERNSEC_RESLOG
77156+ bool "Resource logging"
77157+ default y if GRKERNSEC_CONFIG_AUTO
77158+ help
77159+ If you say Y here, all attempts to overstep resource limits will
77160+ be logged with the resource name, the requested size, and the current
77161+ limit. It is highly recommended that you say Y here. If the sysctl
77162+ option is enabled, a sysctl option with name "resource_logging" is
77163+ created. If the RBAC system is enabled, the sysctl value is ignored.
77164+
77165+config GRKERNSEC_CHROOT_EXECLOG
77166+ bool "Log execs within chroot"
77167+ help
77168+ If you say Y here, all executions inside a chroot jail will be logged
77169+ to syslog. This can cause a large amount of logs if certain
77170+ applications (eg. djb's daemontools) are installed on the system, and
77171+ is therefore left as an option. If the sysctl option is enabled, a
77172+ sysctl option with name "chroot_execlog" is created.
77173+
77174+config GRKERNSEC_AUDIT_PTRACE
77175+ bool "Ptrace logging"
77176+ help
77177+ If you say Y here, all attempts to attach to a process via ptrace
77178+ will be logged. If the sysctl option is enabled, a sysctl option
77179+ with name "audit_ptrace" is created.
77180+
77181+config GRKERNSEC_AUDIT_CHDIR
77182+ bool "Chdir logging"
77183+ help
77184+ If you say Y here, all chdir() calls will be logged. If the sysctl
77185+ option is enabled, a sysctl option with name "audit_chdir" is created.
77186+
77187+config GRKERNSEC_AUDIT_MOUNT
77188+ bool "(Un)Mount logging"
77189+ help
77190+ If you say Y here, all mounts and unmounts will be logged. If the
77191+ sysctl option is enabled, a sysctl option with name "audit_mount" is
77192+ created.
77193+
77194+config GRKERNSEC_SIGNAL
77195+ bool "Signal logging"
77196+ default y if GRKERNSEC_CONFIG_AUTO
77197+ help
77198+ If you say Y here, certain important signals will be logged, such as
77199+ SIGSEGV, which will as a result inform you of when a error in a program
77200+ occurred, which in some cases could mean a possible exploit attempt.
77201+ If the sysctl option is enabled, a sysctl option with name
77202+ "signal_logging" is created.
77203+
77204+config GRKERNSEC_FORKFAIL
77205+ bool "Fork failure logging"
77206+ help
77207+ If you say Y here, all failed fork() attempts will be logged.
77208+ This could suggest a fork bomb, or someone attempting to overstep
77209+ their process limit. If the sysctl option is enabled, a sysctl option
77210+ with name "forkfail_logging" is created.
77211+
77212+config GRKERNSEC_TIME
77213+ bool "Time change logging"
77214+ default y if GRKERNSEC_CONFIG_AUTO
77215+ help
77216+ If you say Y here, any changes of the system clock will be logged.
77217+ If the sysctl option is enabled, a sysctl option with name
77218+ "timechange_logging" is created.
77219+
77220+config GRKERNSEC_PROC_IPADDR
77221+ bool "/proc/<pid>/ipaddr support"
77222+ default y if GRKERNSEC_CONFIG_AUTO
77223+ help
77224+ If you say Y here, a new entry will be added to each /proc/<pid>
77225+ directory that contains the IP address of the person using the task.
77226+ The IP is carried across local TCP and AF_UNIX stream sockets.
77227+ This information can be useful for IDS/IPSes to perform remote response
77228+ to a local attack. The entry is readable by only the owner of the
77229+ process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
77230+ the RBAC system), and thus does not create privacy concerns.
77231+
77232+config GRKERNSEC_RWXMAP_LOG
77233+ bool 'Denied RWX mmap/mprotect logging'
77234+ default y if GRKERNSEC_CONFIG_AUTO
77235+ depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
77236+ help
77237+ If you say Y here, calls to mmap() and mprotect() with explicit
77238+ usage of PROT_WRITE and PROT_EXEC together will be logged when
77239+ denied by the PAX_MPROTECT feature. This feature will also
77240+ log other problematic scenarios that can occur when PAX_MPROTECT
77241+ is enabled on a binary, like textrels and PT_GNU_STACK. If the
77242+ sysctl option is enabled, a sysctl option with name "rwxmap_logging"
77243+ is created.
77244+
77245+endmenu
77246+
77247+menu "Executable Protections"
77248+depends on GRKERNSEC
77249+
77250+config GRKERNSEC_DMESG
77251+ bool "Dmesg(8) restriction"
77252+ default y if GRKERNSEC_CONFIG_AUTO
77253+ help
77254+ If you say Y here, non-root users will not be able to use dmesg(8)
77255+ to view the contents of the kernel's circular log buffer.
77256+ The kernel's log buffer often contains kernel addresses and other
77257+ identifying information useful to an attacker in fingerprinting a
77258+ system for a targeted exploit.
77259+ If the sysctl option is enabled, a sysctl option with name "dmesg" is
77260+ created.
77261+
77262+config GRKERNSEC_HARDEN_PTRACE
77263+ bool "Deter ptrace-based process snooping"
77264+ default y if GRKERNSEC_CONFIG_AUTO
77265+ help
77266+ If you say Y here, TTY sniffers and other malicious monitoring
77267+ programs implemented through ptrace will be defeated. If you
77268+ have been using the RBAC system, this option has already been
77269+ enabled for several years for all users, with the ability to make
77270+ fine-grained exceptions.
77271+
77272+ This option only affects the ability of non-root users to ptrace
77273+ processes that are not a descendent of the ptracing process.
77274+ This means that strace ./binary and gdb ./binary will still work,
77275+ but attaching to arbitrary processes will not. If the sysctl
77276+ option is enabled, a sysctl option with name "harden_ptrace" is
77277+ created.
77278+
77279+config GRKERNSEC_PTRACE_READEXEC
77280+ bool "Require read access to ptrace sensitive binaries"
77281+ default y if GRKERNSEC_CONFIG_AUTO
77282+ help
77283+ If you say Y here, unprivileged users will not be able to ptrace unreadable
77284+ binaries. This option is useful in environments that
77285+ remove the read bits (e.g. file mode 4711) from suid binaries to
77286+ prevent infoleaking of their contents. This option adds
77287+ consistency to the use of that file mode, as the binary could normally
77288+ be read out when run without privileges while ptracing.
77289+
77290+ If the sysctl option is enabled, a sysctl option with name "ptrace_readexec"
77291+ is created.
77292+
77293+config GRKERNSEC_SETXID
77294+ bool "Enforce consistent multithreaded privileges"
77295+ default y if GRKERNSEC_CONFIG_AUTO
77296+ depends on (X86 || SPARC64 || PPC || ARM || MIPS)
77297+ help
77298+ If you say Y here, a change from a root uid to a non-root uid
77299+ in a multithreaded application will cause the resulting uids,
77300+ gids, supplementary groups, and capabilities in that thread
77301+ to be propagated to the other threads of the process. In most
77302+ cases this is unnecessary, as glibc will emulate this behavior
77303+ on behalf of the application. Other libcs do not act in the
77304+ same way, allowing the other threads of the process to continue
77305+ running with root privileges. If the sysctl option is enabled,
77306+ a sysctl option with name "consistent_setxid" is created.
77307+
77308+config GRKERNSEC_HARDEN_IPC
77309+ bool "Disallow access to overly-permissive IPC objects"
77310+ default y if GRKERNSEC_CONFIG_AUTO
77311+ depends on SYSVIPC
77312+ help
77313+ If you say Y here, access to overly-permissive IPC objects (shared
77314+ memory, message queues, and semaphores) will be denied for processes
77315+ given the following criteria beyond normal permission checks:
77316+ 1) If the IPC object is world-accessible and the euid doesn't match
77317+ that of the creator or current uid for the IPC object
77318+ 2) If the IPC object is group-accessible and the egid doesn't
77319+ match that of the creator or current gid for the IPC object
77320+ It's a common error to grant too much permission to these objects,
77321+ with impact ranging from denial of service and information leaking to
77322+ privilege escalation. This feature was developed in response to
77323+ research by Tim Brown:
77324+ http://labs.portcullis.co.uk/whitepapers/memory-squatting-attacks-on-system-v-shared-memory/
77325+ who found hundreds of such insecure usages. Processes with
77326+ CAP_IPC_OWNER are still permitted to access these IPC objects.
77327+ If the sysctl option is enabled, a sysctl option with name
77328+ "harden_ipc" is created.
77329+
77330+config GRKERNSEC_TPE
77331+ bool "Trusted Path Execution (TPE)"
77332+ default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
77333+ help
77334+ If you say Y here, you will be able to choose a gid to add to the
77335+ supplementary groups of users you want to mark as "untrusted."
77336+ These users will not be able to execute any files that are not in
77337+ root-owned directories writable only by root. If the sysctl option
77338+ is enabled, a sysctl option with name "tpe" is created.
77339+
77340+config GRKERNSEC_TPE_ALL
77341+ bool "Partially restrict all non-root users"
77342+ depends on GRKERNSEC_TPE
77343+ help
77344+ If you say Y here, all non-root users will be covered under
77345+ a weaker TPE restriction. This is separate from, and in addition to,
77346+ the main TPE options that you have selected elsewhere. Thus, if a
77347+ "trusted" GID is chosen, this restriction applies to even that GID.
77348+ Under this restriction, all non-root users will only be allowed to
77349+ execute files in directories they own that are not group or
77350+ world-writable, or in directories owned by root and writable only by
77351+ root. If the sysctl option is enabled, a sysctl option with name
77352+ "tpe_restrict_all" is created.
77353+
77354+config GRKERNSEC_TPE_INVERT
77355+ bool "Invert GID option"
77356+ depends on GRKERNSEC_TPE
77357+ help
77358+ If you say Y here, the group you specify in the TPE configuration will
77359+ decide what group TPE restrictions will be *disabled* for. This
77360+ option is useful if you want TPE restrictions to be applied to most
77361+ users on the system. If the sysctl option is enabled, a sysctl option
77362+ with name "tpe_invert" is created. Unlike other sysctl options, this
77363+ entry will default to on for backward-compatibility.
77364+
77365+config GRKERNSEC_TPE_GID
77366+ int
77367+ default GRKERNSEC_TPE_UNTRUSTED_GID if (GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT)
77368+ default GRKERNSEC_TPE_TRUSTED_GID if (GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT)
77369+
77370+config GRKERNSEC_TPE_UNTRUSTED_GID
77371+ int "GID for TPE-untrusted users"
77372+ depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
77373+ default 1005
77374+ help
77375+ Setting this GID determines what group TPE restrictions will be
77376+ *enabled* for. If the sysctl option is enabled, a sysctl option
77377+ with name "tpe_gid" is created.
77378+
77379+config GRKERNSEC_TPE_TRUSTED_GID
77380+ int "GID for TPE-trusted users"
77381+ depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
77382+ default 1005
77383+ help
77384+ Setting this GID determines what group TPE restrictions will be
77385+ *disabled* for. If the sysctl option is enabled, a sysctl option
77386+ with name "tpe_gid" is created.
77387+
77388+endmenu
77389+menu "Network Protections"
77390+depends on GRKERNSEC
77391+
77392+config GRKERNSEC_BLACKHOLE
77393+ bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
77394+ default y if GRKERNSEC_CONFIG_AUTO
77395+ depends on NET
77396+ help
77397+ If you say Y here, neither TCP resets nor ICMP
77398+ destination-unreachable packets will be sent in response to packets
77399+ sent to ports for which no associated listening process exists.
77400+ It will also prevent the sending of ICMP protocol unreachable packets
77401+ in response to packets with unknown protocols.
77402+ This feature supports both IPV4 and IPV6 and exempts the
77403+ loopback interface from blackholing. Enabling this feature
77404+ makes a host more resilient to DoS attacks and reduces network
77405+ visibility against scanners.
77406+
77407+ The blackhole feature as-implemented is equivalent to the FreeBSD
77408+ blackhole feature, as it prevents RST responses to all packets, not
77409+ just SYNs. Under most application behavior this causes no
77410+ problems, but applications (like haproxy) may not close certain
77411+ connections in a way that cleanly terminates them on the remote
77412+ end, leaving the remote host in LAST_ACK state. Because of this
77413+ side-effect and to prevent intentional LAST_ACK DoSes, this
77414+ feature also adds automatic mitigation against such attacks.
77415+ The mitigation drastically reduces the amount of time a socket
77416+ can spend in LAST_ACK state. If you're using haproxy and not
77417+ all servers it connects to have this option enabled, consider
77418+ disabling this feature on the haproxy host.
77419+
77420+ If the sysctl option is enabled, two sysctl options with names
77421+ "ip_blackhole" and "lastack_retries" will be created.
77422+ While "ip_blackhole" takes the standard zero/non-zero on/off
77423+ toggle, "lastack_retries" uses the same kinds of values as
77424+ "tcp_retries1" and "tcp_retries2". The default value of 4
77425+ prevents a socket from lasting more than 45 seconds in LAST_ACK
77426+ state.
77427+
77428+config GRKERNSEC_NO_SIMULT_CONNECT
77429+ bool "Disable TCP Simultaneous Connect"
77430+ default y if GRKERNSEC_CONFIG_AUTO
77431+ depends on NET
77432+ help
77433+ If you say Y here, a feature by Willy Tarreau will be enabled that
77434+ removes a weakness in Linux's strict implementation of TCP that
77435+ allows two clients to connect to each other without either entering
77436+ a listening state. The weakness allows an attacker to easily prevent
77437+ a client from connecting to a known server provided the source port
77438+ for the connection is guessed correctly.
77439+
77440+ As the weakness could be used to prevent an antivirus or IPS from
77441+ fetching updates, or prevent an SSL gateway from fetching a CRL,
77442+ it should be eliminated by enabling this option. Though Linux is
77443+ one of few operating systems supporting simultaneous connect, it
77444+ has no legitimate use in practice and is rarely supported by firewalls.
77445+
77446+config GRKERNSEC_SOCKET
77447+ bool "Socket restrictions"
77448+ depends on NET
77449+ help
77450+ If you say Y here, you will be able to choose from several options.
77451+ If you assign a GID on your system and add it to the supplementary
77452+ groups of users you want to restrict socket access to, this patch
77453+ will perform up to three things, based on the option(s) you choose.
77454+
77455+config GRKERNSEC_SOCKET_ALL
77456+ bool "Deny any sockets to group"
77457+ depends on GRKERNSEC_SOCKET
77458+ help
77459+ If you say Y here, you will be able to choose a GID of whose users will
77460+ be unable to connect to other hosts from your machine or run server
77461+ applications from your machine. If the sysctl option is enabled, a
77462+ sysctl option with name "socket_all" is created.
77463+
77464+config GRKERNSEC_SOCKET_ALL_GID
77465+ int "GID to deny all sockets for"
77466+ depends on GRKERNSEC_SOCKET_ALL
77467+ default 1004
77468+ help
77469+ Here you can choose the GID to disable socket access for. Remember to
77470+ add the users you want socket access disabled for to the GID
77471+ specified here. If the sysctl option is enabled, a sysctl option
77472+ with name "socket_all_gid" is created.
77473+
77474+config GRKERNSEC_SOCKET_CLIENT
77475+ bool "Deny client sockets to group"
77476+ depends on GRKERNSEC_SOCKET
77477+ help
77478+ If you say Y here, you will be able to choose a GID of whose users will
77479+ be unable to connect to other hosts from your machine, but will be
77480+ able to run servers. If this option is enabled, all users in the group
77481+ you specify will have to use passive mode when initiating ftp transfers
77482+ from the shell on your machine. If the sysctl option is enabled, a
77483+ sysctl option with name "socket_client" is created.
77484+
77485+config GRKERNSEC_SOCKET_CLIENT_GID
77486+ int "GID to deny client sockets for"
77487+ depends on GRKERNSEC_SOCKET_CLIENT
77488+ default 1003
77489+ help
77490+ Here you can choose the GID to disable client socket access for.
77491+ Remember to add the users you want client socket access disabled for to
77492+ the GID specified here. If the sysctl option is enabled, a sysctl
77493+ option with name "socket_client_gid" is created.
77494+
77495+config GRKERNSEC_SOCKET_SERVER
77496+ bool "Deny server sockets to group"
77497+ depends on GRKERNSEC_SOCKET
77498+ help
77499+ If you say Y here, you will be able to choose a GID of whose users will
77500+ be unable to run server applications from your machine. If the sysctl
77501+ option is enabled, a sysctl option with name "socket_server" is created.
77502+
77503+config GRKERNSEC_SOCKET_SERVER_GID
77504+ int "GID to deny server sockets for"
77505+ depends on GRKERNSEC_SOCKET_SERVER
77506+ default 1002
77507+ help
77508+ Here you can choose the GID to disable server socket access for.
77509+ Remember to add the users you want server socket access disabled for to
77510+ the GID specified here. If the sysctl option is enabled, a sysctl
77511+ option with name "socket_server_gid" is created.
77512+
77513+endmenu
77514+
77515+menu "Physical Protections"
77516+depends on GRKERNSEC
77517+
77518+config GRKERNSEC_DENYUSB
77519+ bool "Deny new USB connections after toggle"
77520+ default y if GRKERNSEC_CONFIG_AUTO
77521+ depends on SYSCTL && USB_SUPPORT
77522+ help
77523+ If you say Y here, a new sysctl option with name "deny_new_usb"
77524+ will be created. Setting its value to 1 will prevent any new
77525+ USB devices from being recognized by the OS. Any attempted USB
77526+ device insertion will be logged. This option is intended to be
77527+ used against custom USB devices designed to exploit vulnerabilities
77528+ in various USB device drivers.
77529+
77530+ For greatest effectiveness, this sysctl should be set after any
77531+ relevant init scripts. This option is safe to enable in distros
77532+ as each user can choose whether or not to toggle the sysctl.
77533+
77534+config GRKERNSEC_DENYUSB_FORCE
77535+ bool "Reject all USB devices not connected at boot"
77536+ select USB
77537+ depends on GRKERNSEC_DENYUSB
77538+ help
77539+ If you say Y here, a variant of GRKERNSEC_DENYUSB will be enabled
77540+ that doesn't involve a sysctl entry. This option should only be
77541+ enabled if you're sure you want to deny all new USB connections
77542+ at runtime and don't want to modify init scripts. This should not
77543+ be enabled by distros. It forces the core USB code to be built
77544+ into the kernel image so that all devices connected at boot time
77545+ can be recognized and new USB device connections can be prevented
77546+ prior to init running.
77547+
77548+endmenu
77549+
77550+menu "Sysctl Support"
77551+depends on GRKERNSEC && SYSCTL
77552+
77553+config GRKERNSEC_SYSCTL
77554+ bool "Sysctl support"
77555+ default y if GRKERNSEC_CONFIG_AUTO
77556+ help
77557+ If you say Y here, you will be able to change the options that
77558+ grsecurity runs with at bootup, without having to recompile your
77559+ kernel. You can echo values to files in /proc/sys/kernel/grsecurity
77560+ to enable (1) or disable (0) various features. All the sysctl entries
77561+ are mutable until the "grsec_lock" entry is set to a non-zero value.
77562+ All features enabled in the kernel configuration are disabled at boot
77563+ if you do not say Y to the "Turn on features by default" option.
77564+ All options should be set at startup, and the grsec_lock entry should
77565+ be set to a non-zero value after all the options are set.
77566+ *THIS IS EXTREMELY IMPORTANT*
77567+
77568+config GRKERNSEC_SYSCTL_DISTRO
77569+ bool "Extra sysctl support for distro makers (READ HELP)"
77570+ depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
77571+ help
77572+ If you say Y here, additional sysctl options will be created
77573+ for features that affect processes running as root. Therefore,
77574+ it is critical when using this option that the grsec_lock entry be
77575+ enabled after boot. Only distros with prebuilt kernel packages
77576+ with this option enabled that can ensure grsec_lock is enabled
77577+ after boot should use this option.
77578+ *Failure to set grsec_lock after boot makes all grsec features
77579+ this option covers useless*
77580+
77581+ Currently this option creates the following sysctl entries:
77582+ "Disable Privileged I/O": "disable_priv_io"
77583+
77584+config GRKERNSEC_SYSCTL_ON
77585+ bool "Turn on features by default"
77586+ default y if GRKERNSEC_CONFIG_AUTO
77587+ depends on GRKERNSEC_SYSCTL
77588+ help
77589+ If you say Y here, instead of having all features enabled in the
77590+ kernel configuration disabled at boot time, the features will be
77591+ enabled at boot time. It is recommended you say Y here unless
77592+ there is some reason you would want all sysctl-tunable features to
77593+ be disabled by default. As mentioned elsewhere, it is important
77594+ to enable the grsec_lock entry once you have finished modifying
77595+ the sysctl entries.
77596+
77597+endmenu
77598+menu "Logging Options"
77599+depends on GRKERNSEC
77600+
77601+config GRKERNSEC_FLOODTIME
77602+ int "Seconds in between log messages (minimum)"
77603+ default 10
77604+ help
77605+ This option allows you to enforce the number of seconds between
77606+ grsecurity log messages. The default should be suitable for most
77607+ people, however, if you choose to change it, choose a value small enough
77608+ to allow informative logs to be produced, but large enough to
77609+ prevent flooding.
77610+
77611+ Setting both this value and GRKERNSEC_FLOODBURST to 0 will disable
77612+ any rate limiting on grsecurity log messages.
77613+
77614+config GRKERNSEC_FLOODBURST
77615+ int "Number of messages in a burst (maximum)"
77616+ default 6
77617+ help
77618+ This option allows you to choose the maximum number of messages allowed
77619+ within the flood time interval you chose in a separate option. The
77620+ default should be suitable for most people, however if you find that
77621+ many of your logs are being interpreted as flooding, you may want to
77622+ raise this value.
77623+
77624+ Setting both this value and GRKERNSEC_FLOODTIME to 0 will disable
77625+ any rate limiting on grsecurity log messages.
77626+
77627+endmenu
77628diff --git a/grsecurity/Makefile b/grsecurity/Makefile
77629new file mode 100644
77630index 0000000..30ababb
77631--- /dev/null
77632+++ b/grsecurity/Makefile
77633@@ -0,0 +1,54 @@
77634+# grsecurity – access control and security hardening for Linux
77635+# All code in this directory and various hooks located throughout the Linux kernel are
77636+# Copyright (C) 2001-2014 Bradley Spengler, Open Source Security, Inc.
77637+# http://www.grsecurity.net spender@grsecurity.net
77638+#
77639+# This program is free software; you can redistribute it and/or
77640+# modify it under the terms of the GNU General Public License version 2
77641+# as published by the Free Software Foundation.
77642+#
77643+# This program is distributed in the hope that it will be useful,
77644+# but WITHOUT ANY WARRANTY; without even the implied warranty of
77645+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
77646+# GNU General Public License for more details.
77647+#
77648+# You should have received a copy of the GNU General Public License
77649+# along with this program; if not, write to the Free Software
77650+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
77651+
77652+KBUILD_CFLAGS += -Werror
77653+
77654+obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
77655+ grsec_mount.o grsec_sig.o grsec_sysctl.o \
77656+ grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o \
77657+ grsec_usb.o grsec_ipc.o grsec_proc.o
77658+
77659+obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
77660+ gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
77661+ gracl_learn.o grsec_log.o gracl_policy.o
77662+ifdef CONFIG_COMPAT
77663+obj-$(CONFIG_GRKERNSEC) += gracl_compat.o
77664+endif
77665+
77666+obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
77667+
77668+ifdef CONFIG_NET
77669+obj-y += grsec_sock.o
77670+obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
77671+endif
77672+
77673+ifndef CONFIG_GRKERNSEC
77674+obj-y += grsec_disabled.o
77675+endif
77676+
77677+ifdef CONFIG_GRKERNSEC_HIDESYM
77678+extra-y := grsec_hidesym.o
77679+$(obj)/grsec_hidesym.o:
77680+ @-chmod -f 500 /boot
77681+ @-chmod -f 500 /lib/modules
77682+ @-chmod -f 500 /lib64/modules
77683+ @-chmod -f 500 /lib32/modules
77684+ @-chmod -f 700 .
77685+ @-chmod -f 700 $(objtree)
77686+ @echo ' grsec: protected kernel image paths'
77687+endif
77688diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
77689new file mode 100644
77690index 0000000..811af1f
77691--- /dev/null
77692+++ b/grsecurity/gracl.c
77693@@ -0,0 +1,2749 @@
77694+#include <linux/kernel.h>
77695+#include <linux/module.h>
77696+#include <linux/sched.h>
77697+#include <linux/mm.h>
77698+#include <linux/file.h>
77699+#include <linux/fs.h>
77700+#include <linux/namei.h>
77701+#include <linux/mount.h>
77702+#include <linux/tty.h>
77703+#include <linux/proc_fs.h>
77704+#include <linux/lglock.h>
77705+#include <linux/slab.h>
77706+#include <linux/vmalloc.h>
77707+#include <linux/types.h>
77708+#include <linux/sysctl.h>
77709+#include <linux/netdevice.h>
77710+#include <linux/ptrace.h>
77711+#include <linux/gracl.h>
77712+#include <linux/gralloc.h>
77713+#include <linux/security.h>
77714+#include <linux/grinternal.h>
77715+#include <linux/pid_namespace.h>
77716+#include <linux/stop_machine.h>
77717+#include <linux/fdtable.h>
77718+#include <linux/percpu.h>
77719+#include <linux/lglock.h>
77720+#include <linux/hugetlb.h>
77721+#include <linux/posix-timers.h>
77722+#include <linux/prefetch.h>
77723+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
77724+#include <linux/magic.h>
77725+#include <linux/pagemap.h>
77726+#include "../fs/btrfs/async-thread.h"
77727+#include "../fs/btrfs/ctree.h"
77728+#include "../fs/btrfs/btrfs_inode.h"
77729+#endif
77730+#include "../fs/mount.h"
77731+
77732+#include <asm/uaccess.h>
77733+#include <asm/errno.h>
77734+#include <asm/mman.h>
77735+
77736+#define FOR_EACH_ROLE_START(role) \
77737+ role = running_polstate.role_list; \
77738+ while (role) {
77739+
77740+#define FOR_EACH_ROLE_END(role) \
77741+ role = role->prev; \
77742+ }
77743+
77744+extern struct path gr_real_root;
77745+
77746+static struct gr_policy_state running_polstate;
77747+struct gr_policy_state *polstate = &running_polstate;
77748+extern struct gr_alloc_state *current_alloc_state;
77749+
77750+extern char *gr_shared_page[4];
77751+DEFINE_RWLOCK(gr_inode_lock);
77752+
77753+static unsigned int gr_status __read_only = GR_STATUS_INIT;
77754+
77755+#ifdef CONFIG_NET
77756+extern struct vfsmount *sock_mnt;
77757+#endif
77758+
77759+extern struct vfsmount *pipe_mnt;
77760+extern struct vfsmount *shm_mnt;
77761+
77762+#ifdef CONFIG_HUGETLBFS
77763+extern struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
77764+#endif
77765+
77766+extern u16 acl_sp_role_value;
77767+extern struct acl_object_label *fakefs_obj_rw;
77768+extern struct acl_object_label *fakefs_obj_rwx;
77769+
77770+int gr_acl_is_enabled(void)
77771+{
77772+ return (gr_status & GR_READY);
77773+}
77774+
77775+void gr_enable_rbac_system(void)
77776+{
77777+ pax_open_kernel();
77778+ gr_status |= GR_READY;
77779+ pax_close_kernel();
77780+}
77781+
77782+int gr_rbac_disable(void *unused)
77783+{
77784+ pax_open_kernel();
77785+ gr_status &= ~GR_READY;
77786+ pax_close_kernel();
77787+
77788+ return 0;
77789+}
77790+
77791+static inline dev_t __get_dev(const struct dentry *dentry)
77792+{
77793+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
77794+ if (dentry->d_sb->s_magic == BTRFS_SUPER_MAGIC)
77795+ return BTRFS_I(dentry->d_inode)->root->anon_dev;
77796+ else
77797+#endif
77798+ return dentry->d_sb->s_dev;
77799+}
77800+
77801+static inline u64 __get_ino(const struct dentry *dentry)
77802+{
77803+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
77804+ if (dentry->d_sb->s_magic == BTRFS_SUPER_MAGIC)
77805+ return btrfs_ino(dentry->d_inode);
77806+ else
77807+#endif
77808+ return dentry->d_inode->i_ino;
77809+}
77810+
77811+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
77812+{
77813+ return __get_dev(dentry);
77814+}
77815+
77816+u64 gr_get_ino_from_dentry(struct dentry *dentry)
77817+{
77818+ return __get_ino(dentry);
77819+}
77820+
77821+static char gr_task_roletype_to_char(struct task_struct *task)
77822+{
77823+ switch (task->role->roletype &
77824+ (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
77825+ GR_ROLE_SPECIAL)) {
77826+ case GR_ROLE_DEFAULT:
77827+ return 'D';
77828+ case GR_ROLE_USER:
77829+ return 'U';
77830+ case GR_ROLE_GROUP:
77831+ return 'G';
77832+ case GR_ROLE_SPECIAL:
77833+ return 'S';
77834+ }
77835+
77836+ return 'X';
77837+}
77838+
77839+char gr_roletype_to_char(void)
77840+{
77841+ return gr_task_roletype_to_char(current);
77842+}
77843+
77844+int
77845+gr_acl_tpe_check(void)
77846+{
77847+ if (unlikely(!(gr_status & GR_READY)))
77848+ return 0;
77849+ if (current->role->roletype & GR_ROLE_TPE)
77850+ return 1;
77851+ else
77852+ return 0;
77853+}
77854+
77855+int
77856+gr_handle_rawio(const struct inode *inode)
77857+{
77858+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
77859+ if (inode && (S_ISBLK(inode->i_mode) || (S_ISCHR(inode->i_mode) && imajor(inode) == RAW_MAJOR)) &&
77860+ grsec_enable_chroot_caps && proc_is_chrooted(current) &&
77861+ !capable(CAP_SYS_RAWIO))
77862+ return 1;
77863+#endif
77864+ return 0;
77865+}
77866+
77867+int
77868+gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
77869+{
77870+ if (likely(lena != lenb))
77871+ return 0;
77872+
77873+ return !memcmp(a, b, lena);
77874+}
77875+
77876+static int prepend(char **buffer, int *buflen, const char *str, int namelen)
77877+{
77878+ *buflen -= namelen;
77879+ if (*buflen < 0)
77880+ return -ENAMETOOLONG;
77881+ *buffer -= namelen;
77882+ memcpy(*buffer, str, namelen);
77883+ return 0;
77884+}
77885+
77886+static int prepend_name(char **buffer, int *buflen, struct qstr *name)
77887+{
77888+ return prepend(buffer, buflen, name->name, name->len);
77889+}
77890+
77891+static int prepend_path(const struct path *path, struct path *root,
77892+ char **buffer, int *buflen)
77893+{
77894+ struct dentry *dentry = path->dentry;
77895+ struct vfsmount *vfsmnt = path->mnt;
77896+ struct mount *mnt = real_mount(vfsmnt);
77897+ bool slash = false;
77898+ int error = 0;
77899+
77900+ while (dentry != root->dentry || vfsmnt != root->mnt) {
77901+ struct dentry * parent;
77902+
77903+ if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
77904+ /* Global root? */
77905+ if (!mnt_has_parent(mnt)) {
77906+ goto out;
77907+ }
77908+ dentry = mnt->mnt_mountpoint;
77909+ mnt = mnt->mnt_parent;
77910+ vfsmnt = &mnt->mnt;
77911+ continue;
77912+ }
77913+ parent = dentry->d_parent;
77914+ prefetch(parent);
77915+ spin_lock(&dentry->d_lock);
77916+ error = prepend_name(buffer, buflen, &dentry->d_name);
77917+ spin_unlock(&dentry->d_lock);
77918+ if (!error)
77919+ error = prepend(buffer, buflen, "/", 1);
77920+ if (error)
77921+ break;
77922+
77923+ slash = true;
77924+ dentry = parent;
77925+ }
77926+
77927+out:
77928+ if (!error && !slash)
77929+ error = prepend(buffer, buflen, "/", 1);
77930+
77931+ return error;
77932+}
77933+
77934+/* this must be called with mount_lock and rename_lock held */
77935+
77936+static char *__our_d_path(const struct path *path, struct path *root,
77937+ char *buf, int buflen)
77938+{
77939+ char *res = buf + buflen;
77940+ int error;
77941+
77942+ prepend(&res, &buflen, "\0", 1);
77943+ error = prepend_path(path, root, &res, &buflen);
77944+ if (error)
77945+ return ERR_PTR(error);
77946+
77947+ return res;
77948+}
77949+
77950+static char *
77951+gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
77952+{
77953+ char *retval;
77954+
77955+ retval = __our_d_path(path, root, buf, buflen);
77956+ if (unlikely(IS_ERR(retval)))
77957+ retval = strcpy(buf, "<path too long>");
77958+ else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
77959+ retval[1] = '\0';
77960+
77961+ return retval;
77962+}
77963+
77964+static char *
77965+__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
77966+ char *buf, int buflen)
77967+{
77968+ struct path path;
77969+ char *res;
77970+
77971+ path.dentry = (struct dentry *)dentry;
77972+ path.mnt = (struct vfsmount *)vfsmnt;
77973+
77974+ /* we can use gr_real_root.dentry, gr_real_root.mnt, because this is only called
77975+ by the RBAC system */
77976+ res = gen_full_path(&path, &gr_real_root, buf, buflen);
77977+
77978+ return res;
77979+}
77980+
77981+static char *
77982+d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
77983+ char *buf, int buflen)
77984+{
77985+ char *res;
77986+ struct path path;
77987+ struct path root;
77988+ struct task_struct *reaper = init_pid_ns.child_reaper;
77989+
77990+ path.dentry = (struct dentry *)dentry;
77991+ path.mnt = (struct vfsmount *)vfsmnt;
77992+
77993+ /* we can't use gr_real_root.dentry, gr_real_root.mnt, because they belong only to the RBAC system */
77994+ get_fs_root(reaper->fs, &root);
77995+
77996+ read_seqlock_excl(&mount_lock);
77997+ write_seqlock(&rename_lock);
77998+ res = gen_full_path(&path, &root, buf, buflen);
77999+ write_sequnlock(&rename_lock);
78000+ read_sequnlock_excl(&mount_lock);
78001+
78002+ path_put(&root);
78003+ return res;
78004+}
78005+
78006+char *
78007+gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
78008+{
78009+ char *ret;
78010+ read_seqlock_excl(&mount_lock);
78011+ write_seqlock(&rename_lock);
78012+ ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
78013+ PAGE_SIZE);
78014+ write_sequnlock(&rename_lock);
78015+ read_sequnlock_excl(&mount_lock);
78016+ return ret;
78017+}
78018+
78019+static char *
78020+gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
78021+{
78022+ char *ret;
78023+ char *buf;
78024+ int buflen;
78025+
78026+ read_seqlock_excl(&mount_lock);
78027+ write_seqlock(&rename_lock);
78028+ buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
78029+ ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
78030+ buflen = (int)(ret - buf);
78031+ if (buflen >= 5)
78032+ prepend(&ret, &buflen, "/proc", 5);
78033+ else
78034+ ret = strcpy(buf, "<path too long>");
78035+ write_sequnlock(&rename_lock);
78036+ read_sequnlock_excl(&mount_lock);
78037+ return ret;
78038+}
78039+
78040+char *
78041+gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
78042+{
78043+ return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
78044+ PAGE_SIZE);
78045+}
78046+
78047+char *
78048+gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
78049+{
78050+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
78051+ PAGE_SIZE);
78052+}
78053+
78054+char *
78055+gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
78056+{
78057+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
78058+ PAGE_SIZE);
78059+}
78060+
78061+char *
78062+gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
78063+{
78064+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
78065+ PAGE_SIZE);
78066+}
78067+
78068+char *
78069+gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
78070+{
78071+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
78072+ PAGE_SIZE);
78073+}
78074+
78075+__u32
78076+to_gr_audit(const __u32 reqmode)
78077+{
78078+ /* masks off auditable permission flags, then shifts them to create
78079+ auditing flags, and adds the special case of append auditing if
78080+ we're requesting write */
78081+ return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
78082+}
78083+
78084+struct acl_role_label *
78085+__lookup_acl_role_label(const struct gr_policy_state *state, const struct task_struct *task, const uid_t uid,
78086+ const gid_t gid)
78087+{
78088+ unsigned int index = gr_rhash(uid, GR_ROLE_USER, state->acl_role_set.r_size);
78089+ struct acl_role_label *match;
78090+ struct role_allowed_ip *ipp;
78091+ unsigned int x;
78092+ u32 curr_ip = task->signal->saved_ip;
78093+
78094+ match = state->acl_role_set.r_hash[index];
78095+
78096+ while (match) {
78097+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
78098+ for (x = 0; x < match->domain_child_num; x++) {
78099+ if (match->domain_children[x] == uid)
78100+ goto found;
78101+ }
78102+ } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
78103+ break;
78104+ match = match->next;
78105+ }
78106+found:
78107+ if (match == NULL) {
78108+ try_group:
78109+ index = gr_rhash(gid, GR_ROLE_GROUP, state->acl_role_set.r_size);
78110+ match = state->acl_role_set.r_hash[index];
78111+
78112+ while (match) {
78113+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
78114+ for (x = 0; x < match->domain_child_num; x++) {
78115+ if (match->domain_children[x] == gid)
78116+ goto found2;
78117+ }
78118+ } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
78119+ break;
78120+ match = match->next;
78121+ }
78122+found2:
78123+ if (match == NULL)
78124+ match = state->default_role;
78125+ if (match->allowed_ips == NULL)
78126+ return match;
78127+ else {
78128+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
78129+ if (likely
78130+ ((ntohl(curr_ip) & ipp->netmask) ==
78131+ (ntohl(ipp->addr) & ipp->netmask)))
78132+ return match;
78133+ }
78134+ match = state->default_role;
78135+ }
78136+ } else if (match->allowed_ips == NULL) {
78137+ return match;
78138+ } else {
78139+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
78140+ if (likely
78141+ ((ntohl(curr_ip) & ipp->netmask) ==
78142+ (ntohl(ipp->addr) & ipp->netmask)))
78143+ return match;
78144+ }
78145+ goto try_group;
78146+ }
78147+
78148+ return match;
78149+}
78150+
78151+static struct acl_role_label *
78152+lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
78153+ const gid_t gid)
78154+{
78155+ return __lookup_acl_role_label(&running_polstate, task, uid, gid);
78156+}
78157+
78158+struct acl_subject_label *
78159+lookup_acl_subj_label(const u64 ino, const dev_t dev,
78160+ const struct acl_role_label *role)
78161+{
78162+ unsigned int index = gr_fhash(ino, dev, role->subj_hash_size);
78163+ struct acl_subject_label *match;
78164+
78165+ match = role->subj_hash[index];
78166+
78167+ while (match && (match->inode != ino || match->device != dev ||
78168+ (match->mode & GR_DELETED))) {
78169+ match = match->next;
78170+ }
78171+
78172+ if (match && !(match->mode & GR_DELETED))
78173+ return match;
78174+ else
78175+ return NULL;
78176+}
78177+
78178+struct acl_subject_label *
78179+lookup_acl_subj_label_deleted(const u64 ino, const dev_t dev,
78180+ const struct acl_role_label *role)
78181+{
78182+ unsigned int index = gr_fhash(ino, dev, role->subj_hash_size);
78183+ struct acl_subject_label *match;
78184+
78185+ match = role->subj_hash[index];
78186+
78187+ while (match && (match->inode != ino || match->device != dev ||
78188+ !(match->mode & GR_DELETED))) {
78189+ match = match->next;
78190+ }
78191+
78192+ if (match && (match->mode & GR_DELETED))
78193+ return match;
78194+ else
78195+ return NULL;
78196+}
78197+
78198+static struct acl_object_label *
78199+lookup_acl_obj_label(const u64 ino, const dev_t dev,
78200+ const struct acl_subject_label *subj)
78201+{
78202+ unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size);
78203+ struct acl_object_label *match;
78204+
78205+ match = subj->obj_hash[index];
78206+
78207+ while (match && (match->inode != ino || match->device != dev ||
78208+ (match->mode & GR_DELETED))) {
78209+ match = match->next;
78210+ }
78211+
78212+ if (match && !(match->mode & GR_DELETED))
78213+ return match;
78214+ else
78215+ return NULL;
78216+}
78217+
78218+static struct acl_object_label *
78219+lookup_acl_obj_label_create(const u64 ino, const dev_t dev,
78220+ const struct acl_subject_label *subj)
78221+{
78222+ unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size);
78223+ struct acl_object_label *match;
78224+
78225+ match = subj->obj_hash[index];
78226+
78227+ while (match && (match->inode != ino || match->device != dev ||
78228+ !(match->mode & GR_DELETED))) {
78229+ match = match->next;
78230+ }
78231+
78232+ if (match && (match->mode & GR_DELETED))
78233+ return match;
78234+
78235+ match = subj->obj_hash[index];
78236+
78237+ while (match && (match->inode != ino || match->device != dev ||
78238+ (match->mode & GR_DELETED))) {
78239+ match = match->next;
78240+ }
78241+
78242+ if (match && !(match->mode & GR_DELETED))
78243+ return match;
78244+ else
78245+ return NULL;
78246+}
78247+
78248+struct name_entry *
78249+__lookup_name_entry(const struct gr_policy_state *state, const char *name)
78250+{
78251+ unsigned int len = strlen(name);
78252+ unsigned int key = full_name_hash(name, len);
78253+ unsigned int index = key % state->name_set.n_size;
78254+ struct name_entry *match;
78255+
78256+ match = state->name_set.n_hash[index];
78257+
78258+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
78259+ match = match->next;
78260+
78261+ return match;
78262+}
78263+
78264+static struct name_entry *
78265+lookup_name_entry(const char *name)
78266+{
78267+ return __lookup_name_entry(&running_polstate, name);
78268+}
78269+
78270+static struct name_entry *
78271+lookup_name_entry_create(const char *name)
78272+{
78273+ unsigned int len = strlen(name);
78274+ unsigned int key = full_name_hash(name, len);
78275+ unsigned int index = key % running_polstate.name_set.n_size;
78276+ struct name_entry *match;
78277+
78278+ match = running_polstate.name_set.n_hash[index];
78279+
78280+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
78281+ !match->deleted))
78282+ match = match->next;
78283+
78284+ if (match && match->deleted)
78285+ return match;
78286+
78287+ match = running_polstate.name_set.n_hash[index];
78288+
78289+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
78290+ match->deleted))
78291+ match = match->next;
78292+
78293+ if (match && !match->deleted)
78294+ return match;
78295+ else
78296+ return NULL;
78297+}
78298+
78299+static struct inodev_entry *
78300+lookup_inodev_entry(const u64 ino, const dev_t dev)
78301+{
78302+ unsigned int index = gr_fhash(ino, dev, running_polstate.inodev_set.i_size);
78303+ struct inodev_entry *match;
78304+
78305+ match = running_polstate.inodev_set.i_hash[index];
78306+
78307+ while (match && (match->nentry->inode != ino || match->nentry->device != dev))
78308+ match = match->next;
78309+
78310+ return match;
78311+}
78312+
78313+void
78314+__insert_inodev_entry(const struct gr_policy_state *state, struct inodev_entry *entry)
78315+{
78316+ unsigned int index = gr_fhash(entry->nentry->inode, entry->nentry->device,
78317+ state->inodev_set.i_size);
78318+ struct inodev_entry **curr;
78319+
78320+ entry->prev = NULL;
78321+
78322+ curr = &state->inodev_set.i_hash[index];
78323+ if (*curr != NULL)
78324+ (*curr)->prev = entry;
78325+
78326+ entry->next = *curr;
78327+ *curr = entry;
78328+
78329+ return;
78330+}
78331+
78332+static void
78333+insert_inodev_entry(struct inodev_entry *entry)
78334+{
78335+ __insert_inodev_entry(&running_polstate, entry);
78336+}
78337+
78338+void
78339+insert_acl_obj_label(struct acl_object_label *obj,
78340+ struct acl_subject_label *subj)
78341+{
78342+ unsigned int index =
78343+ gr_fhash(obj->inode, obj->device, subj->obj_hash_size);
78344+ struct acl_object_label **curr;
78345+
78346+ obj->prev = NULL;
78347+
78348+ curr = &subj->obj_hash[index];
78349+ if (*curr != NULL)
78350+ (*curr)->prev = obj;
78351+
78352+ obj->next = *curr;
78353+ *curr = obj;
78354+
78355+ return;
78356+}
78357+
78358+void
78359+insert_acl_subj_label(struct acl_subject_label *obj,
78360+ struct acl_role_label *role)
78361+{
78362+ unsigned int index = gr_fhash(obj->inode, obj->device, role->subj_hash_size);
78363+ struct acl_subject_label **curr;
78364+
78365+ obj->prev = NULL;
78366+
78367+ curr = &role->subj_hash[index];
78368+ if (*curr != NULL)
78369+ (*curr)->prev = obj;
78370+
78371+ obj->next = *curr;
78372+ *curr = obj;
78373+
78374+ return;
78375+}
78376+
78377+/* derived from glibc fnmatch() 0: match, 1: no match*/
78378+
78379+static int
78380+glob_match(const char *p, const char *n)
78381+{
78382+ char c;
78383+
78384+ while ((c = *p++) != '\0') {
78385+ switch (c) {
78386+ case '?':
78387+ if (*n == '\0')
78388+ return 1;
78389+ else if (*n == '/')
78390+ return 1;
78391+ break;
78392+ case '\\':
78393+ if (*n != c)
78394+ return 1;
78395+ break;
78396+ case '*':
78397+ for (c = *p++; c == '?' || c == '*'; c = *p++) {
78398+ if (*n == '/')
78399+ return 1;
78400+ else if (c == '?') {
78401+ if (*n == '\0')
78402+ return 1;
78403+ else
78404+ ++n;
78405+ }
78406+ }
78407+ if (c == '\0') {
78408+ return 0;
78409+ } else {
78410+ const char *endp;
78411+
78412+ if ((endp = strchr(n, '/')) == NULL)
78413+ endp = n + strlen(n);
78414+
78415+ if (c == '[') {
78416+ for (--p; n < endp; ++n)
78417+ if (!glob_match(p, n))
78418+ return 0;
78419+ } else if (c == '/') {
78420+ while (*n != '\0' && *n != '/')
78421+ ++n;
78422+ if (*n == '/' && !glob_match(p, n + 1))
78423+ return 0;
78424+ } else {
78425+ for (--p; n < endp; ++n)
78426+ if (*n == c && !glob_match(p, n))
78427+ return 0;
78428+ }
78429+
78430+ return 1;
78431+ }
78432+ case '[':
78433+ {
78434+ int not;
78435+ char cold;
78436+
78437+ if (*n == '\0' || *n == '/')
78438+ return 1;
78439+
78440+ not = (*p == '!' || *p == '^');
78441+ if (not)
78442+ ++p;
78443+
78444+ c = *p++;
78445+ for (;;) {
78446+ unsigned char fn = (unsigned char)*n;
78447+
78448+ if (c == '\0')
78449+ return 1;
78450+ else {
78451+ if (c == fn)
78452+ goto matched;
78453+ cold = c;
78454+ c = *p++;
78455+
78456+ if (c == '-' && *p != ']') {
78457+ unsigned char cend = *p++;
78458+
78459+ if (cend == '\0')
78460+ return 1;
78461+
78462+ if (cold <= fn && fn <= cend)
78463+ goto matched;
78464+
78465+ c = *p++;
78466+ }
78467+ }
78468+
78469+ if (c == ']')
78470+ break;
78471+ }
78472+ if (!not)
78473+ return 1;
78474+ break;
78475+ matched:
78476+ while (c != ']') {
78477+ if (c == '\0')
78478+ return 1;
78479+
78480+ c = *p++;
78481+ }
78482+ if (not)
78483+ return 1;
78484+ }
78485+ break;
78486+ default:
78487+ if (c != *n)
78488+ return 1;
78489+ }
78490+
78491+ ++n;
78492+ }
78493+
78494+ if (*n == '\0')
78495+ return 0;
78496+
78497+ if (*n == '/')
78498+ return 0;
78499+
78500+ return 1;
78501+}
78502+
78503+static struct acl_object_label *
78504+chk_glob_label(struct acl_object_label *globbed,
78505+ const struct dentry *dentry, const struct vfsmount *mnt, char **path)
78506+{
78507+ struct acl_object_label *tmp;
78508+
78509+ if (*path == NULL)
78510+ *path = gr_to_filename_nolock(dentry, mnt);
78511+
78512+ tmp = globbed;
78513+
78514+ while (tmp) {
78515+ if (!glob_match(tmp->filename, *path))
78516+ return tmp;
78517+ tmp = tmp->next;
78518+ }
78519+
78520+ return NULL;
78521+}
78522+
78523+static struct acl_object_label *
78524+__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
78525+ const u64 curr_ino, const dev_t curr_dev,
78526+ const struct acl_subject_label *subj, char **path, const int checkglob)
78527+{
78528+ struct acl_subject_label *tmpsubj;
78529+ struct acl_object_label *retval;
78530+ struct acl_object_label *retval2;
78531+
78532+ tmpsubj = (struct acl_subject_label *) subj;
78533+ read_lock(&gr_inode_lock);
78534+ do {
78535+ retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
78536+ if (retval) {
78537+ if (checkglob && retval->globbed) {
78538+ retval2 = chk_glob_label(retval->globbed, orig_dentry, orig_mnt, path);
78539+ if (retval2)
78540+ retval = retval2;
78541+ }
78542+ break;
78543+ }
78544+ } while ((tmpsubj = tmpsubj->parent_subject));
78545+ read_unlock(&gr_inode_lock);
78546+
78547+ return retval;
78548+}
78549+
78550+static struct acl_object_label *
78551+full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
78552+ struct dentry *curr_dentry,
78553+ const struct acl_subject_label *subj, char **path, const int checkglob)
78554+{
78555+ int newglob = checkglob;
78556+ u64 inode;
78557+ dev_t device;
78558+
78559+ /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
78560+ as we don't want a / * rule to match instead of the / object
78561+ don't do this for create lookups that call this function though, since they're looking up
78562+ on the parent and thus need globbing checks on all paths
78563+ */
78564+ if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
78565+ newglob = GR_NO_GLOB;
78566+
78567+ spin_lock(&curr_dentry->d_lock);
78568+ inode = __get_ino(curr_dentry);
78569+ device = __get_dev(curr_dentry);
78570+ spin_unlock(&curr_dentry->d_lock);
78571+
78572+ return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
78573+}
78574+
78575+#ifdef CONFIG_HUGETLBFS
78576+static inline bool
78577+is_hugetlbfs_mnt(const struct vfsmount *mnt)
78578+{
78579+ int i;
78580+ for (i = 0; i < HUGE_MAX_HSTATE; i++) {
78581+ if (unlikely(hugetlbfs_vfsmount[i] == mnt))
78582+ return true;
78583+ }
78584+
78585+ return false;
78586+}
78587+#endif
78588+
78589+static struct acl_object_label *
78590+__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
78591+ const struct acl_subject_label *subj, char *path, const int checkglob)
78592+{
78593+ struct dentry *dentry = (struct dentry *) l_dentry;
78594+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
78595+ struct mount *real_mnt = real_mount(mnt);
78596+ struct acl_object_label *retval;
78597+ struct dentry *parent;
78598+
78599+ read_seqlock_excl(&mount_lock);
78600+ write_seqlock(&rename_lock);
78601+
78602+ if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
78603+#ifdef CONFIG_NET
78604+ mnt == sock_mnt ||
78605+#endif
78606+#ifdef CONFIG_HUGETLBFS
78607+ (is_hugetlbfs_mnt(mnt) && dentry->d_inode->i_nlink == 0) ||
78608+#endif
78609+ /* ignore Eric Biederman */
78610+ IS_PRIVATE(l_dentry->d_inode))) {
78611+ retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
78612+ goto out;
78613+ }
78614+
78615+ for (;;) {
78616+ if (dentry == gr_real_root.dentry && mnt == gr_real_root.mnt)
78617+ break;
78618+
78619+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
78620+ if (!mnt_has_parent(real_mnt))
78621+ break;
78622+
78623+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
78624+ if (retval != NULL)
78625+ goto out;
78626+
78627+ dentry = real_mnt->mnt_mountpoint;
78628+ real_mnt = real_mnt->mnt_parent;
78629+ mnt = &real_mnt->mnt;
78630+ continue;
78631+ }
78632+
78633+ parent = dentry->d_parent;
78634+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
78635+ if (retval != NULL)
78636+ goto out;
78637+
78638+ dentry = parent;
78639+ }
78640+
78641+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
78642+
78643+ /* gr_real_root is pinned so we don't have to hold a reference */
78644+ if (retval == NULL)
78645+ retval = full_lookup(l_dentry, l_mnt, gr_real_root.dentry, subj, &path, checkglob);
78646+out:
78647+ write_sequnlock(&rename_lock);
78648+ read_sequnlock_excl(&mount_lock);
78649+
78650+ BUG_ON(retval == NULL);
78651+
78652+ return retval;
78653+}
78654+
78655+static struct acl_object_label *
78656+chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
78657+ const struct acl_subject_label *subj)
78658+{
78659+ char *path = NULL;
78660+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
78661+}
78662+
78663+static struct acl_object_label *
78664+chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
78665+ const struct acl_subject_label *subj)
78666+{
78667+ char *path = NULL;
78668+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
78669+}
78670+
78671+static struct acl_object_label *
78672+chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
78673+ const struct acl_subject_label *subj, char *path)
78674+{
78675+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
78676+}
78677+
78678+struct acl_subject_label *
78679+chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
78680+ const struct acl_role_label *role)
78681+{
78682+ struct dentry *dentry = (struct dentry *) l_dentry;
78683+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
78684+ struct mount *real_mnt = real_mount(mnt);
78685+ struct acl_subject_label *retval;
78686+ struct dentry *parent;
78687+
78688+ read_seqlock_excl(&mount_lock);
78689+ write_seqlock(&rename_lock);
78690+
78691+ for (;;) {
78692+ if (dentry == gr_real_root.dentry && mnt == gr_real_root.mnt)
78693+ break;
78694+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
78695+ if (!mnt_has_parent(real_mnt))
78696+ break;
78697+
78698+ spin_lock(&dentry->d_lock);
78699+ read_lock(&gr_inode_lock);
78700+ retval =
78701+ lookup_acl_subj_label(__get_ino(dentry),
78702+ __get_dev(dentry), role);
78703+ read_unlock(&gr_inode_lock);
78704+ spin_unlock(&dentry->d_lock);
78705+ if (retval != NULL)
78706+ goto out;
78707+
78708+ dentry = real_mnt->mnt_mountpoint;
78709+ real_mnt = real_mnt->mnt_parent;
78710+ mnt = &real_mnt->mnt;
78711+ continue;
78712+ }
78713+
78714+ spin_lock(&dentry->d_lock);
78715+ read_lock(&gr_inode_lock);
78716+ retval = lookup_acl_subj_label(__get_ino(dentry),
78717+ __get_dev(dentry), role);
78718+ read_unlock(&gr_inode_lock);
78719+ parent = dentry->d_parent;
78720+ spin_unlock(&dentry->d_lock);
78721+
78722+ if (retval != NULL)
78723+ goto out;
78724+
78725+ dentry = parent;
78726+ }
78727+
78728+ spin_lock(&dentry->d_lock);
78729+ read_lock(&gr_inode_lock);
78730+ retval = lookup_acl_subj_label(__get_ino(dentry),
78731+ __get_dev(dentry), role);
78732+ read_unlock(&gr_inode_lock);
78733+ spin_unlock(&dentry->d_lock);
78734+
78735+ if (unlikely(retval == NULL)) {
78736+ /* gr_real_root is pinned, we don't need to hold a reference */
78737+ read_lock(&gr_inode_lock);
78738+ retval = lookup_acl_subj_label(__get_ino(gr_real_root.dentry),
78739+ __get_dev(gr_real_root.dentry), role);
78740+ read_unlock(&gr_inode_lock);
78741+ }
78742+out:
78743+ write_sequnlock(&rename_lock);
78744+ read_sequnlock_excl(&mount_lock);
78745+
78746+ BUG_ON(retval == NULL);
78747+
78748+ return retval;
78749+}
78750+
78751+void
78752+assign_special_role(const char *rolename)
78753+{
78754+ struct acl_object_label *obj;
78755+ struct acl_role_label *r;
78756+ struct acl_role_label *assigned = NULL;
78757+ struct task_struct *tsk;
78758+ struct file *filp;
78759+
78760+ FOR_EACH_ROLE_START(r)
78761+ if (!strcmp(rolename, r->rolename) &&
78762+ (r->roletype & GR_ROLE_SPECIAL)) {
78763+ assigned = r;
78764+ break;
78765+ }
78766+ FOR_EACH_ROLE_END(r)
78767+
78768+ if (!assigned)
78769+ return;
78770+
78771+ read_lock(&tasklist_lock);
78772+ read_lock(&grsec_exec_file_lock);
78773+
78774+ tsk = current->real_parent;
78775+ if (tsk == NULL)
78776+ goto out_unlock;
78777+
78778+ filp = tsk->exec_file;
78779+ if (filp == NULL)
78780+ goto out_unlock;
78781+
78782+ tsk->is_writable = 0;
78783+ tsk->inherited = 0;
78784+
78785+ tsk->acl_sp_role = 1;
78786+ tsk->acl_role_id = ++acl_sp_role_value;
78787+ tsk->role = assigned;
78788+ tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
78789+
78790+ /* ignore additional mmap checks for processes that are writable
78791+ by the default ACL */
78792+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, running_polstate.default_role->root_label);
78793+ if (unlikely(obj->mode & GR_WRITE))
78794+ tsk->is_writable = 1;
78795+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
78796+ if (unlikely(obj->mode & GR_WRITE))
78797+ tsk->is_writable = 1;
78798+
78799+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
78800+ printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename,
78801+ tsk->acl->filename, tsk->comm, task_pid_nr(tsk));
78802+#endif
78803+
78804+out_unlock:
78805+ read_unlock(&grsec_exec_file_lock);
78806+ read_unlock(&tasklist_lock);
78807+ return;
78808+}
78809+
78810+
78811+static void
78812+gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
78813+{
78814+ struct task_struct *task = current;
78815+ const struct cred *cred = current_cred();
78816+
78817+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
78818+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
78819+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
78820+ 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
78821+
78822+ return;
78823+}
78824+
78825+static void
78826+gr_log_learn_uid_change(const kuid_t real, const kuid_t effective, const kuid_t fs)
78827+{
78828+ struct task_struct *task = current;
78829+ const struct cred *cred = current_cred();
78830+
78831+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
78832+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
78833+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
78834+ 'u', GR_GLOBAL_UID(real), GR_GLOBAL_UID(effective), GR_GLOBAL_UID(fs), &task->signal->saved_ip);
78835+
78836+ return;
78837+}
78838+
78839+static void
78840+gr_log_learn_gid_change(const kgid_t real, const kgid_t effective, const kgid_t fs)
78841+{
78842+ struct task_struct *task = current;
78843+ const struct cred *cred = current_cred();
78844+
78845+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
78846+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
78847+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
78848+ 'g', GR_GLOBAL_GID(real), GR_GLOBAL_GID(effective), GR_GLOBAL_GID(fs), &task->signal->saved_ip);
78849+
78850+ return;
78851+}
78852+
78853+static void
78854+gr_set_proc_res(struct task_struct *task)
78855+{
78856+ struct acl_subject_label *proc;
78857+ unsigned short i;
78858+
78859+ proc = task->acl;
78860+
78861+ if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
78862+ return;
78863+
78864+ for (i = 0; i < RLIM_NLIMITS; i++) {
78865+ unsigned long rlim_cur, rlim_max;
78866+
78867+ if (!(proc->resmask & (1U << i)))
78868+ continue;
78869+
78870+ rlim_cur = proc->res[i].rlim_cur;
78871+ rlim_max = proc->res[i].rlim_max;
78872+
78873+ if (i == RLIMIT_NOFILE) {
78874+ unsigned long saved_sysctl_nr_open = sysctl_nr_open;
78875+ if (rlim_cur > saved_sysctl_nr_open)
78876+ rlim_cur = saved_sysctl_nr_open;
78877+ if (rlim_max > saved_sysctl_nr_open)
78878+ rlim_max = saved_sysctl_nr_open;
78879+ }
78880+
78881+ task->signal->rlim[i].rlim_cur = rlim_cur;
78882+ task->signal->rlim[i].rlim_max = rlim_max;
78883+
78884+ if (i == RLIMIT_CPU)
78885+ update_rlimit_cpu(task, rlim_cur);
78886+ }
78887+
78888+ return;
78889+}
78890+
78891+/* both of the below must be called with
78892+ rcu_read_lock();
78893+ read_lock(&tasklist_lock);
78894+ read_lock(&grsec_exec_file_lock);
78895+ except in the case of gr_set_role_label() (for __gr_get_subject_for_task)
78896+*/
78897+
78898+struct acl_subject_label *__gr_get_subject_for_task(const struct gr_policy_state *state, struct task_struct *task, const char *filename, int fallback)
78899+{
78900+ char *tmpname;
78901+ struct acl_subject_label *tmpsubj;
78902+ struct file *filp;
78903+ struct name_entry *nmatch;
78904+
78905+ filp = task->exec_file;
78906+ if (filp == NULL)
78907+ return NULL;
78908+
78909+ /* the following is to apply the correct subject
78910+ on binaries running when the RBAC system
78911+ is enabled, when the binaries have been
78912+ replaced or deleted since their execution
78913+ -----
78914+ when the RBAC system starts, the inode/dev
78915+ from exec_file will be one the RBAC system
78916+ is unaware of. It only knows the inode/dev
78917+ of the present file on disk, or the absence
78918+ of it.
78919+ */
78920+
78921+ if (filename)
78922+ nmatch = __lookup_name_entry(state, filename);
78923+ else {
78924+ preempt_disable();
78925+ tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
78926+
78927+ nmatch = __lookup_name_entry(state, tmpname);
78928+ preempt_enable();
78929+ }
78930+ tmpsubj = NULL;
78931+ if (nmatch) {
78932+ if (nmatch->deleted)
78933+ tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
78934+ else
78935+ tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
78936+ }
78937+ /* this also works for the reload case -- if we don't match a potentially inherited subject
78938+ then we fall back to a normal lookup based on the binary's ino/dev
78939+ */
78940+ if (tmpsubj == NULL && fallback)
78941+ tmpsubj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, task->role);
78942+
78943+ return tmpsubj;
78944+}
78945+
78946+static struct acl_subject_label *gr_get_subject_for_task(struct task_struct *task, const char *filename, int fallback)
78947+{
78948+ return __gr_get_subject_for_task(&running_polstate, task, filename, fallback);
78949+}
78950+
78951+void __gr_apply_subject_to_task(const struct gr_policy_state *state, struct task_struct *task, struct acl_subject_label *subj)
78952+{
78953+ struct acl_object_label *obj;
78954+ struct file *filp;
78955+
78956+ filp = task->exec_file;
78957+
78958+ task->acl = subj;
78959+ task->is_writable = 0;
78960+ /* ignore additional mmap checks for processes that are writable
78961+ by the default ACL */
78962+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, state->default_role->root_label);
78963+ if (unlikely(obj->mode & GR_WRITE))
78964+ task->is_writable = 1;
78965+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
78966+ if (unlikely(obj->mode & GR_WRITE))
78967+ task->is_writable = 1;
78968+
78969+ gr_set_proc_res(task);
78970+
78971+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
78972+ printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
78973+#endif
78974+}
78975+
78976+static void gr_apply_subject_to_task(struct task_struct *task, struct acl_subject_label *subj)
78977+{
78978+ __gr_apply_subject_to_task(&running_polstate, task, subj);
78979+}
78980+
78981+__u32
78982+gr_search_file(const struct dentry * dentry, const __u32 mode,
78983+ const struct vfsmount * mnt)
78984+{
78985+ __u32 retval = mode;
78986+ struct acl_subject_label *curracl;
78987+ struct acl_object_label *currobj;
78988+
78989+ if (unlikely(!(gr_status & GR_READY)))
78990+ return (mode & ~GR_AUDITS);
78991+
78992+ curracl = current->acl;
78993+
78994+ currobj = chk_obj_label(dentry, mnt, curracl);
78995+ retval = currobj->mode & mode;
78996+
78997+ /* if we're opening a specified transfer file for writing
78998+ (e.g. /dev/initctl), then transfer our role to init
78999+ */
79000+ if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
79001+ current->role->roletype & GR_ROLE_PERSIST)) {
79002+ struct task_struct *task = init_pid_ns.child_reaper;
79003+
79004+ if (task->role != current->role) {
79005+ struct acl_subject_label *subj;
79006+
79007+ task->acl_sp_role = 0;
79008+ task->acl_role_id = current->acl_role_id;
79009+ task->role = current->role;
79010+ rcu_read_lock();
79011+ read_lock(&grsec_exec_file_lock);
79012+ subj = gr_get_subject_for_task(task, NULL, 1);
79013+ gr_apply_subject_to_task(task, subj);
79014+ read_unlock(&grsec_exec_file_lock);
79015+ rcu_read_unlock();
79016+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
79017+ }
79018+ }
79019+
79020+ if (unlikely
79021+ ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
79022+ && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
79023+ __u32 new_mode = mode;
79024+
79025+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
79026+
79027+ retval = new_mode;
79028+
79029+ if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
79030+ new_mode |= GR_INHERIT;
79031+
79032+ if (!(mode & GR_NOLEARN))
79033+ gr_log_learn(dentry, mnt, new_mode);
79034+ }
79035+
79036+ return retval;
79037+}
79038+
79039+struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
79040+ const struct dentry *parent,
79041+ const struct vfsmount *mnt)
79042+{
79043+ struct name_entry *match;
79044+ struct acl_object_label *matchpo;
79045+ struct acl_subject_label *curracl;
79046+ char *path;
79047+
79048+ if (unlikely(!(gr_status & GR_READY)))
79049+ return NULL;
79050+
79051+ preempt_disable();
79052+ path = gr_to_filename_rbac(new_dentry, mnt);
79053+ match = lookup_name_entry_create(path);
79054+
79055+ curracl = current->acl;
79056+
79057+ if (match) {
79058+ read_lock(&gr_inode_lock);
79059+ matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
79060+ read_unlock(&gr_inode_lock);
79061+
79062+ if (matchpo) {
79063+ preempt_enable();
79064+ return matchpo;
79065+ }
79066+ }
79067+
79068+ // lookup parent
79069+
79070+ matchpo = chk_obj_create_label(parent, mnt, curracl, path);
79071+
79072+ preempt_enable();
79073+ return matchpo;
79074+}
79075+
79076+__u32
79077+gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
79078+ const struct vfsmount * mnt, const __u32 mode)
79079+{
79080+ struct acl_object_label *matchpo;
79081+ __u32 retval;
79082+
79083+ if (unlikely(!(gr_status & GR_READY)))
79084+ return (mode & ~GR_AUDITS);
79085+
79086+ matchpo = gr_get_create_object(new_dentry, parent, mnt);
79087+
79088+ retval = matchpo->mode & mode;
79089+
79090+ if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
79091+ && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
79092+ __u32 new_mode = mode;
79093+
79094+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
79095+
79096+ gr_log_learn(new_dentry, mnt, new_mode);
79097+ return new_mode;
79098+ }
79099+
79100+ return retval;
79101+}
79102+
79103+__u32
79104+gr_check_link(const struct dentry * new_dentry,
79105+ const struct dentry * parent_dentry,
79106+ const struct vfsmount * parent_mnt,
79107+ const struct dentry * old_dentry, const struct vfsmount * old_mnt)
79108+{
79109+ struct acl_object_label *obj;
79110+ __u32 oldmode, newmode;
79111+ __u32 needmode;
79112+ __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
79113+ GR_DELETE | GR_INHERIT;
79114+
79115+ if (unlikely(!(gr_status & GR_READY)))
79116+ return (GR_CREATE | GR_LINK);
79117+
79118+ obj = chk_obj_label(old_dentry, old_mnt, current->acl);
79119+ oldmode = obj->mode;
79120+
79121+ obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
79122+ newmode = obj->mode;
79123+
79124+ needmode = newmode & checkmodes;
79125+
79126+ // old name for hardlink must have at least the permissions of the new name
79127+ if ((oldmode & needmode) != needmode)
79128+ goto bad;
79129+
79130+ // if old name had restrictions/auditing, make sure the new name does as well
79131+ needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
79132+
79133+ // don't allow hardlinking of suid/sgid/fcapped files without permission
79134+ if (is_privileged_binary(old_dentry))
79135+ needmode |= GR_SETID;
79136+
79137+ if ((newmode & needmode) != needmode)
79138+ goto bad;
79139+
79140+ // enforce minimum permissions
79141+ if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
79142+ return newmode;
79143+bad:
79144+ needmode = oldmode;
79145+ if (is_privileged_binary(old_dentry))
79146+ needmode |= GR_SETID;
79147+
79148+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
79149+ gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
79150+ return (GR_CREATE | GR_LINK);
79151+ } else if (newmode & GR_SUPPRESS)
79152+ return GR_SUPPRESS;
79153+ else
79154+ return 0;
79155+}
79156+
79157+int
79158+gr_check_hidden_task(const struct task_struct *task)
79159+{
79160+ if (unlikely(!(gr_status & GR_READY)))
79161+ return 0;
79162+
79163+ if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
79164+ return 1;
79165+
79166+ return 0;
79167+}
79168+
79169+int
79170+gr_check_protected_task(const struct task_struct *task)
79171+{
79172+ if (unlikely(!(gr_status & GR_READY) || !task))
79173+ return 0;
79174+
79175+ if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
79176+ task->acl != current->acl)
79177+ return 1;
79178+
79179+ return 0;
79180+}
79181+
79182+int
79183+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
79184+{
79185+ struct task_struct *p;
79186+ int ret = 0;
79187+
79188+ if (unlikely(!(gr_status & GR_READY) || !pid))
79189+ return ret;
79190+
79191+ read_lock(&tasklist_lock);
79192+ do_each_pid_task(pid, type, p) {
79193+ if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
79194+ p->acl != current->acl) {
79195+ ret = 1;
79196+ goto out;
79197+ }
79198+ } while_each_pid_task(pid, type, p);
79199+out:
79200+ read_unlock(&tasklist_lock);
79201+
79202+ return ret;
79203+}
79204+
79205+void
79206+gr_copy_label(struct task_struct *tsk)
79207+{
79208+ struct task_struct *p = current;
79209+
79210+ tsk->inherited = p->inherited;
79211+ tsk->acl_sp_role = 0;
79212+ tsk->acl_role_id = p->acl_role_id;
79213+ tsk->acl = p->acl;
79214+ tsk->role = p->role;
79215+ tsk->signal->used_accept = 0;
79216+ tsk->signal->curr_ip = p->signal->curr_ip;
79217+ tsk->signal->saved_ip = p->signal->saved_ip;
79218+ if (p->exec_file)
79219+ get_file(p->exec_file);
79220+ tsk->exec_file = p->exec_file;
79221+ tsk->is_writable = p->is_writable;
79222+ if (unlikely(p->signal->used_accept)) {
79223+ p->signal->curr_ip = 0;
79224+ p->signal->saved_ip = 0;
79225+ }
79226+
79227+ return;
79228+}
79229+
79230+extern int gr_process_kernel_setuid_ban(struct user_struct *user);
79231+
79232+int
79233+gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs)
79234+{
79235+ unsigned int i;
79236+ __u16 num;
79237+ uid_t *uidlist;
79238+ uid_t curuid;
79239+ int realok = 0;
79240+ int effectiveok = 0;
79241+ int fsok = 0;
79242+ uid_t globalreal, globaleffective, globalfs;
79243+
79244+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT)
79245+ struct user_struct *user;
79246+
79247+ if (!uid_valid(real))
79248+ goto skipit;
79249+
79250+ /* find user based on global namespace */
79251+
79252+ globalreal = GR_GLOBAL_UID(real);
79253+
79254+ user = find_user(make_kuid(&init_user_ns, globalreal));
79255+ if (user == NULL)
79256+ goto skipit;
79257+
79258+ if (gr_process_kernel_setuid_ban(user)) {
79259+ /* for find_user */
79260+ free_uid(user);
79261+ return 1;
79262+ }
79263+
79264+ /* for find_user */
79265+ free_uid(user);
79266+
79267+skipit:
79268+#endif
79269+
79270+ if (unlikely(!(gr_status & GR_READY)))
79271+ return 0;
79272+
79273+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
79274+ gr_log_learn_uid_change(real, effective, fs);
79275+
79276+ num = current->acl->user_trans_num;
79277+ uidlist = current->acl->user_transitions;
79278+
79279+ if (uidlist == NULL)
79280+ return 0;
79281+
79282+ if (!uid_valid(real)) {
79283+ realok = 1;
79284+ globalreal = (uid_t)-1;
79285+ } else {
79286+ globalreal = GR_GLOBAL_UID(real);
79287+ }
79288+ if (!uid_valid(effective)) {
79289+ effectiveok = 1;
79290+ globaleffective = (uid_t)-1;
79291+ } else {
79292+ globaleffective = GR_GLOBAL_UID(effective);
79293+ }
79294+ if (!uid_valid(fs)) {
79295+ fsok = 1;
79296+ globalfs = (uid_t)-1;
79297+ } else {
79298+ globalfs = GR_GLOBAL_UID(fs);
79299+ }
79300+
79301+ if (current->acl->user_trans_type & GR_ID_ALLOW) {
79302+ for (i = 0; i < num; i++) {
79303+ curuid = uidlist[i];
79304+ if (globalreal == curuid)
79305+ realok = 1;
79306+ if (globaleffective == curuid)
79307+ effectiveok = 1;
79308+ if (globalfs == curuid)
79309+ fsok = 1;
79310+ }
79311+ } else if (current->acl->user_trans_type & GR_ID_DENY) {
79312+ for (i = 0; i < num; i++) {
79313+ curuid = uidlist[i];
79314+ if (globalreal == curuid)
79315+ break;
79316+ if (globaleffective == curuid)
79317+ break;
79318+ if (globalfs == curuid)
79319+ break;
79320+ }
79321+ /* not in deny list */
79322+ if (i == num) {
79323+ realok = 1;
79324+ effectiveok = 1;
79325+ fsok = 1;
79326+ }
79327+ }
79328+
79329+ if (realok && effectiveok && fsok)
79330+ return 0;
79331+ else {
79332+ gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : globalfs) : globaleffective) : globalreal);
79333+ return 1;
79334+ }
79335+}
79336+
79337+int
79338+gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs)
79339+{
79340+ unsigned int i;
79341+ __u16 num;
79342+ gid_t *gidlist;
79343+ gid_t curgid;
79344+ int realok = 0;
79345+ int effectiveok = 0;
79346+ int fsok = 0;
79347+ gid_t globalreal, globaleffective, globalfs;
79348+
79349+ if (unlikely(!(gr_status & GR_READY)))
79350+ return 0;
79351+
79352+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
79353+ gr_log_learn_gid_change(real, effective, fs);
79354+
79355+ num = current->acl->group_trans_num;
79356+ gidlist = current->acl->group_transitions;
79357+
79358+ if (gidlist == NULL)
79359+ return 0;
79360+
79361+ if (!gid_valid(real)) {
79362+ realok = 1;
79363+ globalreal = (gid_t)-1;
79364+ } else {
79365+ globalreal = GR_GLOBAL_GID(real);
79366+ }
79367+ if (!gid_valid(effective)) {
79368+ effectiveok = 1;
79369+ globaleffective = (gid_t)-1;
79370+ } else {
79371+ globaleffective = GR_GLOBAL_GID(effective);
79372+ }
79373+ if (!gid_valid(fs)) {
79374+ fsok = 1;
79375+ globalfs = (gid_t)-1;
79376+ } else {
79377+ globalfs = GR_GLOBAL_GID(fs);
79378+ }
79379+
79380+ if (current->acl->group_trans_type & GR_ID_ALLOW) {
79381+ for (i = 0; i < num; i++) {
79382+ curgid = gidlist[i];
79383+ if (globalreal == curgid)
79384+ realok = 1;
79385+ if (globaleffective == curgid)
79386+ effectiveok = 1;
79387+ if (globalfs == curgid)
79388+ fsok = 1;
79389+ }
79390+ } else if (current->acl->group_trans_type & GR_ID_DENY) {
79391+ for (i = 0; i < num; i++) {
79392+ curgid = gidlist[i];
79393+ if (globalreal == curgid)
79394+ break;
79395+ if (globaleffective == curgid)
79396+ break;
79397+ if (globalfs == curgid)
79398+ break;
79399+ }
79400+ /* not in deny list */
79401+ if (i == num) {
79402+ realok = 1;
79403+ effectiveok = 1;
79404+ fsok = 1;
79405+ }
79406+ }
79407+
79408+ if (realok && effectiveok && fsok)
79409+ return 0;
79410+ else {
79411+ gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : globalfs) : globaleffective) : globalreal);
79412+ return 1;
79413+ }
79414+}
79415+
79416+extern int gr_acl_is_capable(const int cap);
79417+
79418+void
79419+gr_set_role_label(struct task_struct *task, const kuid_t kuid, const kgid_t kgid)
79420+{
79421+ struct acl_role_label *role = task->role;
79422+ struct acl_role_label *origrole = role;
79423+ struct acl_subject_label *subj = NULL;
79424+ struct acl_object_label *obj;
79425+ struct file *filp;
79426+ uid_t uid;
79427+ gid_t gid;
79428+
79429+ if (unlikely(!(gr_status & GR_READY)))
79430+ return;
79431+
79432+ uid = GR_GLOBAL_UID(kuid);
79433+ gid = GR_GLOBAL_GID(kgid);
79434+
79435+ filp = task->exec_file;
79436+
79437+ /* kernel process, we'll give them the kernel role */
79438+ if (unlikely(!filp)) {
79439+ task->role = running_polstate.kernel_role;
79440+ task->acl = running_polstate.kernel_role->root_label;
79441+ return;
79442+ } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL)) {
79443+ /* save the current ip at time of role lookup so that the proper
79444+ IP will be learned for role_allowed_ip */
79445+ task->signal->saved_ip = task->signal->curr_ip;
79446+ role = lookup_acl_role_label(task, uid, gid);
79447+ }
79448+
79449+ /* don't change the role if we're not a privileged process */
79450+ if (role && task->role != role &&
79451+ (((role->roletype & GR_ROLE_USER) && !gr_acl_is_capable(CAP_SETUID)) ||
79452+ ((role->roletype & GR_ROLE_GROUP) && !gr_acl_is_capable(CAP_SETGID))))
79453+ return;
79454+
79455+ task->role = role;
79456+
79457+ if (task->inherited) {
79458+ /* if we reached our subject through inheritance, then first see
79459+ if there's a subject of the same name in the new role that has
79460+ an object that would result in the same inherited subject
79461+ */
79462+ subj = gr_get_subject_for_task(task, task->acl->filename, 0);
79463+ if (subj) {
79464+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, subj);
79465+ if (!(obj->mode & GR_INHERIT))
79466+ subj = NULL;
79467+ }
79468+
79469+ }
79470+ if (subj == NULL) {
79471+ /* otherwise:
79472+ perform subject lookup in possibly new role
79473+ we can use this result below in the case where role == task->role
79474+ */
79475+ subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
79476+ }
79477+
79478+ /* if we changed uid/gid, but result in the same role
79479+ and are using inheritance, don't lose the inherited subject
79480+ if current subject is other than what normal lookup
79481+ would result in, we arrived via inheritance, don't
79482+ lose subject
79483+ */
79484+ if (role != origrole || (!(task->acl->mode & GR_INHERITLEARN) &&
79485+ (subj == task->acl)))
79486+ task->acl = subj;
79487+
79488+ /* leave task->inherited unaffected */
79489+
79490+ task->is_writable = 0;
79491+
79492+ /* ignore additional mmap checks for processes that are writable
79493+ by the default ACL */
79494+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, running_polstate.default_role->root_label);
79495+ if (unlikely(obj->mode & GR_WRITE))
79496+ task->is_writable = 1;
79497+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
79498+ if (unlikely(obj->mode & GR_WRITE))
79499+ task->is_writable = 1;
79500+
79501+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
79502+ printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
79503+#endif
79504+
79505+ gr_set_proc_res(task);
79506+
79507+ return;
79508+}
79509+
79510+int
79511+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
79512+ const int unsafe_flags)
79513+{
79514+ struct task_struct *task = current;
79515+ struct acl_subject_label *newacl;
79516+ struct acl_object_label *obj;
79517+ __u32 retmode;
79518+
79519+ if (unlikely(!(gr_status & GR_READY)))
79520+ return 0;
79521+
79522+ newacl = chk_subj_label(dentry, mnt, task->role);
79523+
79524+ /* special handling for if we did an strace -f -p <pid> from an admin role, where pid then
79525+ did an exec
79526+ */
79527+ rcu_read_lock();
79528+ read_lock(&tasklist_lock);
79529+ if (task->ptrace && task->parent && ((task->parent->role->roletype & GR_ROLE_GOD) ||
79530+ (task->parent->acl->mode & GR_POVERRIDE))) {
79531+ read_unlock(&tasklist_lock);
79532+ rcu_read_unlock();
79533+ goto skip_check;
79534+ }
79535+ read_unlock(&tasklist_lock);
79536+ rcu_read_unlock();
79537+
79538+ if (unsafe_flags && !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
79539+ !(task->role->roletype & GR_ROLE_GOD) &&
79540+ !gr_search_file(dentry, GR_PTRACERD, mnt) &&
79541+ !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
79542+ if (unsafe_flags & LSM_UNSAFE_SHARE)
79543+ gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
79544+ else
79545+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
79546+ return -EACCES;
79547+ }
79548+
79549+skip_check:
79550+
79551+ obj = chk_obj_label(dentry, mnt, task->acl);
79552+ retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
79553+
79554+ if (!(task->acl->mode & GR_INHERITLEARN) &&
79555+ ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
79556+ if (obj->nested)
79557+ task->acl = obj->nested;
79558+ else
79559+ task->acl = newacl;
79560+ task->inherited = 0;
79561+ } else {
79562+ task->inherited = 1;
79563+ if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
79564+ gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
79565+ }
79566+
79567+ task->is_writable = 0;
79568+
79569+ /* ignore additional mmap checks for processes that are writable
79570+ by the default ACL */
79571+ obj = chk_obj_label(dentry, mnt, running_polstate.default_role->root_label);
79572+ if (unlikely(obj->mode & GR_WRITE))
79573+ task->is_writable = 1;
79574+ obj = chk_obj_label(dentry, mnt, task->role->root_label);
79575+ if (unlikely(obj->mode & GR_WRITE))
79576+ task->is_writable = 1;
79577+
79578+ gr_set_proc_res(task);
79579+
79580+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
79581+ printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
79582+#endif
79583+ return 0;
79584+}
79585+
79586+/* always called with valid inodev ptr */
79587+static void
79588+do_handle_delete(struct inodev_entry *inodev, const u64 ino, const dev_t dev)
79589+{
79590+ struct acl_object_label *matchpo;
79591+ struct acl_subject_label *matchps;
79592+ struct acl_subject_label *subj;
79593+ struct acl_role_label *role;
79594+ unsigned int x;
79595+
79596+ FOR_EACH_ROLE_START(role)
79597+ FOR_EACH_SUBJECT_START(role, subj, x)
79598+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
79599+ matchpo->mode |= GR_DELETED;
79600+ FOR_EACH_SUBJECT_END(subj,x)
79601+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
79602+ /* nested subjects aren't in the role's subj_hash table */
79603+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
79604+ matchpo->mode |= GR_DELETED;
79605+ FOR_EACH_NESTED_SUBJECT_END(subj)
79606+ if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
79607+ matchps->mode |= GR_DELETED;
79608+ FOR_EACH_ROLE_END(role)
79609+
79610+ inodev->nentry->deleted = 1;
79611+
79612+ return;
79613+}
79614+
79615+void
79616+gr_handle_delete(const u64 ino, const dev_t dev)
79617+{
79618+ struct inodev_entry *inodev;
79619+
79620+ if (unlikely(!(gr_status & GR_READY)))
79621+ return;
79622+
79623+ write_lock(&gr_inode_lock);
79624+ inodev = lookup_inodev_entry(ino, dev);
79625+ if (inodev != NULL)
79626+ do_handle_delete(inodev, ino, dev);
79627+ write_unlock(&gr_inode_lock);
79628+
79629+ return;
79630+}
79631+
79632+static void
79633+update_acl_obj_label(const u64 oldinode, const dev_t olddevice,
79634+ const u64 newinode, const dev_t newdevice,
79635+ struct acl_subject_label *subj)
79636+{
79637+ unsigned int index = gr_fhash(oldinode, olddevice, subj->obj_hash_size);
79638+ struct acl_object_label *match;
79639+
79640+ match = subj->obj_hash[index];
79641+
79642+ while (match && (match->inode != oldinode ||
79643+ match->device != olddevice ||
79644+ !(match->mode & GR_DELETED)))
79645+ match = match->next;
79646+
79647+ if (match && (match->inode == oldinode)
79648+ && (match->device == olddevice)
79649+ && (match->mode & GR_DELETED)) {
79650+ if (match->prev == NULL) {
79651+ subj->obj_hash[index] = match->next;
79652+ if (match->next != NULL)
79653+ match->next->prev = NULL;
79654+ } else {
79655+ match->prev->next = match->next;
79656+ if (match->next != NULL)
79657+ match->next->prev = match->prev;
79658+ }
79659+ match->prev = NULL;
79660+ match->next = NULL;
79661+ match->inode = newinode;
79662+ match->device = newdevice;
79663+ match->mode &= ~GR_DELETED;
79664+
79665+ insert_acl_obj_label(match, subj);
79666+ }
79667+
79668+ return;
79669+}
79670+
79671+static void
79672+update_acl_subj_label(const u64 oldinode, const dev_t olddevice,
79673+ const u64 newinode, const dev_t newdevice,
79674+ struct acl_role_label *role)
79675+{
79676+ unsigned int index = gr_fhash(oldinode, olddevice, role->subj_hash_size);
79677+ struct acl_subject_label *match;
79678+
79679+ match = role->subj_hash[index];
79680+
79681+ while (match && (match->inode != oldinode ||
79682+ match->device != olddevice ||
79683+ !(match->mode & GR_DELETED)))
79684+ match = match->next;
79685+
79686+ if (match && (match->inode == oldinode)
79687+ && (match->device == olddevice)
79688+ && (match->mode & GR_DELETED)) {
79689+ if (match->prev == NULL) {
79690+ role->subj_hash[index] = match->next;
79691+ if (match->next != NULL)
79692+ match->next->prev = NULL;
79693+ } else {
79694+ match->prev->next = match->next;
79695+ if (match->next != NULL)
79696+ match->next->prev = match->prev;
79697+ }
79698+ match->prev = NULL;
79699+ match->next = NULL;
79700+ match->inode = newinode;
79701+ match->device = newdevice;
79702+ match->mode &= ~GR_DELETED;
79703+
79704+ insert_acl_subj_label(match, role);
79705+ }
79706+
79707+ return;
79708+}
79709+
79710+static void
79711+update_inodev_entry(const u64 oldinode, const dev_t olddevice,
79712+ const u64 newinode, const dev_t newdevice)
79713+{
79714+ unsigned int index = gr_fhash(oldinode, olddevice, running_polstate.inodev_set.i_size);
79715+ struct inodev_entry *match;
79716+
79717+ match = running_polstate.inodev_set.i_hash[index];
79718+
79719+ while (match && (match->nentry->inode != oldinode ||
79720+ match->nentry->device != olddevice || !match->nentry->deleted))
79721+ match = match->next;
79722+
79723+ if (match && (match->nentry->inode == oldinode)
79724+ && (match->nentry->device == olddevice) &&
79725+ match->nentry->deleted) {
79726+ if (match->prev == NULL) {
79727+ running_polstate.inodev_set.i_hash[index] = match->next;
79728+ if (match->next != NULL)
79729+ match->next->prev = NULL;
79730+ } else {
79731+ match->prev->next = match->next;
79732+ if (match->next != NULL)
79733+ match->next->prev = match->prev;
79734+ }
79735+ match->prev = NULL;
79736+ match->next = NULL;
79737+ match->nentry->inode = newinode;
79738+ match->nentry->device = newdevice;
79739+ match->nentry->deleted = 0;
79740+
79741+ insert_inodev_entry(match);
79742+ }
79743+
79744+ return;
79745+}
79746+
79747+static void
79748+__do_handle_create(const struct name_entry *matchn, u64 ino, dev_t dev)
79749+{
79750+ struct acl_subject_label *subj;
79751+ struct acl_role_label *role;
79752+ unsigned int x;
79753+
79754+ FOR_EACH_ROLE_START(role)
79755+ update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
79756+
79757+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
79758+ if ((subj->inode == ino) && (subj->device == dev)) {
79759+ subj->inode = ino;
79760+ subj->device = dev;
79761+ }
79762+ /* nested subjects aren't in the role's subj_hash table */
79763+ update_acl_obj_label(matchn->inode, matchn->device,
79764+ ino, dev, subj);
79765+ FOR_EACH_NESTED_SUBJECT_END(subj)
79766+ FOR_EACH_SUBJECT_START(role, subj, x)
79767+ update_acl_obj_label(matchn->inode, matchn->device,
79768+ ino, dev, subj);
79769+ FOR_EACH_SUBJECT_END(subj,x)
79770+ FOR_EACH_ROLE_END(role)
79771+
79772+ update_inodev_entry(matchn->inode, matchn->device, ino, dev);
79773+
79774+ return;
79775+}
79776+
79777+static void
79778+do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
79779+ const struct vfsmount *mnt)
79780+{
79781+ u64 ino = __get_ino(dentry);
79782+ dev_t dev = __get_dev(dentry);
79783+
79784+ __do_handle_create(matchn, ino, dev);
79785+
79786+ return;
79787+}
79788+
79789+void
79790+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
79791+{
79792+ struct name_entry *matchn;
79793+
79794+ if (unlikely(!(gr_status & GR_READY)))
79795+ return;
79796+
79797+ preempt_disable();
79798+ matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
79799+
79800+ if (unlikely((unsigned long)matchn)) {
79801+ write_lock(&gr_inode_lock);
79802+ do_handle_create(matchn, dentry, mnt);
79803+ write_unlock(&gr_inode_lock);
79804+ }
79805+ preempt_enable();
79806+
79807+ return;
79808+}
79809+
79810+void
79811+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
79812+{
79813+ struct name_entry *matchn;
79814+
79815+ if (unlikely(!(gr_status & GR_READY)))
79816+ return;
79817+
79818+ preempt_disable();
79819+ matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
79820+
79821+ if (unlikely((unsigned long)matchn)) {
79822+ write_lock(&gr_inode_lock);
79823+ __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
79824+ write_unlock(&gr_inode_lock);
79825+ }
79826+ preempt_enable();
79827+
79828+ return;
79829+}
79830+
79831+void
79832+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
79833+ struct dentry *old_dentry,
79834+ struct dentry *new_dentry,
79835+ struct vfsmount *mnt, const __u8 replace, unsigned int flags)
79836+{
79837+ struct name_entry *matchn;
79838+ struct name_entry *matchn2 = NULL;
79839+ struct inodev_entry *inodev;
79840+ struct inode *inode = new_dentry->d_inode;
79841+ u64 old_ino = __get_ino(old_dentry);
79842+ dev_t old_dev = __get_dev(old_dentry);
79843+ unsigned int exchange = flags & RENAME_EXCHANGE;
79844+
79845+ /* vfs_rename swaps the name and parent link for old_dentry and
79846+ new_dentry
79847+ at this point, old_dentry has the new name, parent link, and inode
79848+ for the renamed file
79849+ if a file is being replaced by a rename, new_dentry has the inode
79850+ and name for the replaced file
79851+ */
79852+
79853+ if (unlikely(!(gr_status & GR_READY)))
79854+ return;
79855+
79856+ preempt_disable();
79857+ matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
79858+
79859+ /* exchange cases:
79860+ a filename exists for the source, but not dest
79861+ do a recreate on source
79862+ a filename exists for the dest, but not source
79863+ do a recreate on dest
79864+ a filename exists for both source and dest
79865+ delete source and dest, then create source and dest
79866+ a filename exists for neither source nor dest
79867+ no updates needed
79868+
79869+ the name entry lookups get us the old inode/dev associated with
79870+ each name, so do the deletes first (if possible) so that when
79871+ we do the create, we pick up on the right entries
79872+ */
79873+
79874+ if (exchange)
79875+ matchn2 = lookup_name_entry(gr_to_filename_rbac(new_dentry, mnt));
79876+
79877+ /* we wouldn't have to check d_inode if it weren't for
79878+ NFS silly-renaming
79879+ */
79880+
79881+ write_lock(&gr_inode_lock);
79882+ if (unlikely((replace || exchange) && inode)) {
79883+ u64 new_ino = __get_ino(new_dentry);
79884+ dev_t new_dev = __get_dev(new_dentry);
79885+
79886+ inodev = lookup_inodev_entry(new_ino, new_dev);
79887+ if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
79888+ do_handle_delete(inodev, new_ino, new_dev);
79889+ }
79890+
79891+ inodev = lookup_inodev_entry(old_ino, old_dev);
79892+ if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
79893+ do_handle_delete(inodev, old_ino, old_dev);
79894+
79895+ if (unlikely(matchn != NULL))
79896+ do_handle_create(matchn, old_dentry, mnt);
79897+
79898+ if (unlikely(matchn2 != NULL))
79899+ do_handle_create(matchn2, new_dentry, mnt);
79900+
79901+ write_unlock(&gr_inode_lock);
79902+ preempt_enable();
79903+
79904+ return;
79905+}
79906+
79907+#if defined(CONFIG_GRKERNSEC_RESLOG) || !defined(CONFIG_GRKERNSEC_NO_RBAC)
79908+static const unsigned long res_learn_bumps[GR_NLIMITS] = {
79909+ [RLIMIT_CPU] = GR_RLIM_CPU_BUMP,
79910+ [RLIMIT_FSIZE] = GR_RLIM_FSIZE_BUMP,
79911+ [RLIMIT_DATA] = GR_RLIM_DATA_BUMP,
79912+ [RLIMIT_STACK] = GR_RLIM_STACK_BUMP,
79913+ [RLIMIT_CORE] = GR_RLIM_CORE_BUMP,
79914+ [RLIMIT_RSS] = GR_RLIM_RSS_BUMP,
79915+ [RLIMIT_NPROC] = GR_RLIM_NPROC_BUMP,
79916+ [RLIMIT_NOFILE] = GR_RLIM_NOFILE_BUMP,
79917+ [RLIMIT_MEMLOCK] = GR_RLIM_MEMLOCK_BUMP,
79918+ [RLIMIT_AS] = GR_RLIM_AS_BUMP,
79919+ [RLIMIT_LOCKS] = GR_RLIM_LOCKS_BUMP,
79920+ [RLIMIT_SIGPENDING] = GR_RLIM_SIGPENDING_BUMP,
79921+ [RLIMIT_MSGQUEUE] = GR_RLIM_MSGQUEUE_BUMP,
79922+ [RLIMIT_NICE] = GR_RLIM_NICE_BUMP,
79923+ [RLIMIT_RTPRIO] = GR_RLIM_RTPRIO_BUMP,
79924+ [RLIMIT_RTTIME] = GR_RLIM_RTTIME_BUMP
79925+};
79926+
79927+void
79928+gr_learn_resource(const struct task_struct *task,
79929+ const int res, const unsigned long wanted, const int gt)
79930+{
79931+ struct acl_subject_label *acl;
79932+ const struct cred *cred;
79933+
79934+ if (unlikely((gr_status & GR_READY) &&
79935+ task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
79936+ goto skip_reslog;
79937+
79938+ gr_log_resource(task, res, wanted, gt);
79939+skip_reslog:
79940+
79941+ if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
79942+ return;
79943+
79944+ acl = task->acl;
79945+
79946+ if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
79947+ !(acl->resmask & (1U << (unsigned short) res))))
79948+ return;
79949+
79950+ if (wanted >= acl->res[res].rlim_cur) {
79951+ unsigned long res_add;
79952+
79953+ res_add = wanted + res_learn_bumps[res];
79954+
79955+ acl->res[res].rlim_cur = res_add;
79956+
79957+ if (wanted > acl->res[res].rlim_max)
79958+ acl->res[res].rlim_max = res_add;
79959+
79960+ /* only log the subject filename, since resource logging is supported for
79961+ single-subject learning only */
79962+ rcu_read_lock();
79963+ cred = __task_cred(task);
79964+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
79965+ task->role->roletype, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), acl->filename,
79966+ acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
79967+ "", (unsigned long) res, &task->signal->saved_ip);
79968+ rcu_read_unlock();
79969+ }
79970+
79971+ return;
79972+}
79973+EXPORT_SYMBOL_GPL(gr_learn_resource);
79974+#endif
79975+
79976+#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
79977+void
79978+pax_set_initial_flags(struct linux_binprm *bprm)
79979+{
79980+ struct task_struct *task = current;
79981+ struct acl_subject_label *proc;
79982+ unsigned long flags;
79983+
79984+ if (unlikely(!(gr_status & GR_READY)))
79985+ return;
79986+
79987+ flags = pax_get_flags(task);
79988+
79989+ proc = task->acl;
79990+
79991+ if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
79992+ flags &= ~MF_PAX_PAGEEXEC;
79993+ if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
79994+ flags &= ~MF_PAX_SEGMEXEC;
79995+ if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
79996+ flags &= ~MF_PAX_RANDMMAP;
79997+ if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
79998+ flags &= ~MF_PAX_EMUTRAMP;
79999+ if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
80000+ flags &= ~MF_PAX_MPROTECT;
80001+
80002+ if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
80003+ flags |= MF_PAX_PAGEEXEC;
80004+ if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
80005+ flags |= MF_PAX_SEGMEXEC;
80006+ if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
80007+ flags |= MF_PAX_RANDMMAP;
80008+ if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
80009+ flags |= MF_PAX_EMUTRAMP;
80010+ if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
80011+ flags |= MF_PAX_MPROTECT;
80012+
80013+ pax_set_flags(task, flags);
80014+
80015+ return;
80016+}
80017+#endif
80018+
80019+int
80020+gr_handle_proc_ptrace(struct task_struct *task)
80021+{
80022+ struct file *filp;
80023+ struct task_struct *tmp = task;
80024+ struct task_struct *curtemp = current;
80025+ __u32 retmode;
80026+
80027+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
80028+ if (unlikely(!(gr_status & GR_READY)))
80029+ return 0;
80030+#endif
80031+
80032+ read_lock(&tasklist_lock);
80033+ read_lock(&grsec_exec_file_lock);
80034+ filp = task->exec_file;
80035+
80036+ while (task_pid_nr(tmp) > 0) {
80037+ if (tmp == curtemp)
80038+ break;
80039+ tmp = tmp->real_parent;
80040+ }
80041+
80042+ if (!filp || (task_pid_nr(tmp) == 0 && ((grsec_enable_harden_ptrace && gr_is_global_nonroot(current_uid()) && !(gr_status & GR_READY)) ||
80043+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
80044+ read_unlock(&grsec_exec_file_lock);
80045+ read_unlock(&tasklist_lock);
80046+ return 1;
80047+ }
80048+
80049+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
80050+ if (!(gr_status & GR_READY)) {
80051+ read_unlock(&grsec_exec_file_lock);
80052+ read_unlock(&tasklist_lock);
80053+ return 0;
80054+ }
80055+#endif
80056+
80057+ retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
80058+ read_unlock(&grsec_exec_file_lock);
80059+ read_unlock(&tasklist_lock);
80060+
80061+ if (retmode & GR_NOPTRACE)
80062+ return 1;
80063+
80064+ if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
80065+ && (current->acl != task->acl || (current->acl != current->role->root_label
80066+ && task_pid_nr(current) != task_pid_nr(task))))
80067+ return 1;
80068+
80069+ return 0;
80070+}
80071+
80072+void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
80073+{
80074+ if (unlikely(!(gr_status & GR_READY)))
80075+ return;
80076+
80077+ if (!(current->role->roletype & GR_ROLE_GOD))
80078+ return;
80079+
80080+ seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
80081+ p->role->rolename, gr_task_roletype_to_char(p),
80082+ p->acl->filename);
80083+}
80084+
80085+int
80086+gr_handle_ptrace(struct task_struct *task, const long request)
80087+{
80088+ struct task_struct *tmp = task;
80089+ struct task_struct *curtemp = current;
80090+ __u32 retmode;
80091+
80092+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
80093+ if (unlikely(!(gr_status & GR_READY)))
80094+ return 0;
80095+#endif
80096+ if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
80097+ read_lock(&tasklist_lock);
80098+ while (task_pid_nr(tmp) > 0) {
80099+ if (tmp == curtemp)
80100+ break;
80101+ tmp = tmp->real_parent;
80102+ }
80103+
80104+ if (task_pid_nr(tmp) == 0 && ((grsec_enable_harden_ptrace && gr_is_global_nonroot(current_uid()) && !(gr_status & GR_READY)) ||
80105+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
80106+ read_unlock(&tasklist_lock);
80107+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
80108+ return 1;
80109+ }
80110+ read_unlock(&tasklist_lock);
80111+ }
80112+
80113+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
80114+ if (!(gr_status & GR_READY))
80115+ return 0;
80116+#endif
80117+
80118+ read_lock(&grsec_exec_file_lock);
80119+ if (unlikely(!task->exec_file)) {
80120+ read_unlock(&grsec_exec_file_lock);
80121+ return 0;
80122+ }
80123+
80124+ retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
80125+ read_unlock(&grsec_exec_file_lock);
80126+
80127+ if (retmode & GR_NOPTRACE) {
80128+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
80129+ return 1;
80130+ }
80131+
80132+ if (retmode & GR_PTRACERD) {
80133+ switch (request) {
80134+ case PTRACE_SEIZE:
80135+ case PTRACE_POKETEXT:
80136+ case PTRACE_POKEDATA:
80137+ case PTRACE_POKEUSR:
80138+#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
80139+ case PTRACE_SETREGS:
80140+ case PTRACE_SETFPREGS:
80141+#endif
80142+#ifdef CONFIG_X86
80143+ case PTRACE_SETFPXREGS:
80144+#endif
80145+#ifdef CONFIG_ALTIVEC
80146+ case PTRACE_SETVRREGS:
80147+#endif
80148+ return 1;
80149+ default:
80150+ return 0;
80151+ }
80152+ } else if (!(current->acl->mode & GR_POVERRIDE) &&
80153+ !(current->role->roletype & GR_ROLE_GOD) &&
80154+ (current->acl != task->acl)) {
80155+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
80156+ return 1;
80157+ }
80158+
80159+ return 0;
80160+}
80161+
80162+static int is_writable_mmap(const struct file *filp)
80163+{
80164+ struct task_struct *task = current;
80165+ struct acl_object_label *obj, *obj2;
80166+
80167+ if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
80168+ !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
80169+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, running_polstate.default_role->root_label);
80170+ obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
80171+ task->role->root_label);
80172+ if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
80173+ gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
80174+ return 1;
80175+ }
80176+ }
80177+ return 0;
80178+}
80179+
80180+int
80181+gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
80182+{
80183+ __u32 mode;
80184+
80185+ if (unlikely(!file || !(prot & PROT_EXEC)))
80186+ return 1;
80187+
80188+ if (is_writable_mmap(file))
80189+ return 0;
80190+
80191+ mode =
80192+ gr_search_file(file->f_path.dentry,
80193+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
80194+ file->f_path.mnt);
80195+
80196+ if (!gr_tpe_allow(file))
80197+ return 0;
80198+
80199+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
80200+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
80201+ return 0;
80202+ } else if (unlikely(!(mode & GR_EXEC))) {
80203+ return 0;
80204+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
80205+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
80206+ return 1;
80207+ }
80208+
80209+ return 1;
80210+}
80211+
80212+int
80213+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
80214+{
80215+ __u32 mode;
80216+
80217+ if (unlikely(!file || !(prot & PROT_EXEC)))
80218+ return 1;
80219+
80220+ if (is_writable_mmap(file))
80221+ return 0;
80222+
80223+ mode =
80224+ gr_search_file(file->f_path.dentry,
80225+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
80226+ file->f_path.mnt);
80227+
80228+ if (!gr_tpe_allow(file))
80229+ return 0;
80230+
80231+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
80232+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
80233+ return 0;
80234+ } else if (unlikely(!(mode & GR_EXEC))) {
80235+ return 0;
80236+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
80237+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
80238+ return 1;
80239+ }
80240+
80241+ return 1;
80242+}
80243+
80244+void
80245+gr_acl_handle_psacct(struct task_struct *task, const long code)
80246+{
80247+ unsigned long runtime, cputime;
80248+ cputime_t utime, stime;
80249+ unsigned int wday, cday;
80250+ __u8 whr, chr;
80251+ __u8 wmin, cmin;
80252+ __u8 wsec, csec;
80253+ struct timespec curtime, starttime;
80254+
80255+ if (unlikely(!(gr_status & GR_READY) || !task->acl ||
80256+ !(task->acl->mode & GR_PROCACCT)))
80257+ return;
80258+
80259+ curtime = ns_to_timespec(ktime_get_ns());
80260+ starttime = ns_to_timespec(task->start_time);
80261+ runtime = curtime.tv_sec - starttime.tv_sec;
80262+ wday = runtime / (60 * 60 * 24);
80263+ runtime -= wday * (60 * 60 * 24);
80264+ whr = runtime / (60 * 60);
80265+ runtime -= whr * (60 * 60);
80266+ wmin = runtime / 60;
80267+ runtime -= wmin * 60;
80268+ wsec = runtime;
80269+
80270+ task_cputime(task, &utime, &stime);
80271+ cputime = cputime_to_secs(utime + stime);
80272+ cday = cputime / (60 * 60 * 24);
80273+ cputime -= cday * (60 * 60 * 24);
80274+ chr = cputime / (60 * 60);
80275+ cputime -= chr * (60 * 60);
80276+ cmin = cputime / 60;
80277+ cputime -= cmin * 60;
80278+ csec = cputime;
80279+
80280+ gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
80281+
80282+ return;
80283+}
80284+
80285+#ifdef CONFIG_TASKSTATS
80286+int gr_is_taskstats_denied(int pid)
80287+{
80288+ struct task_struct *task;
80289+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
80290+ const struct cred *cred;
80291+#endif
80292+ int ret = 0;
80293+
80294+ /* restrict taskstats viewing to un-chrooted root users
80295+ who have the 'view' subject flag if the RBAC system is enabled
80296+ */
80297+
80298+ rcu_read_lock();
80299+ read_lock(&tasklist_lock);
80300+ task = find_task_by_vpid(pid);
80301+ if (task) {
80302+#ifdef CONFIG_GRKERNSEC_CHROOT
80303+ if (proc_is_chrooted(task))
80304+ ret = -EACCES;
80305+#endif
80306+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
80307+ cred = __task_cred(task);
80308+#ifdef CONFIG_GRKERNSEC_PROC_USER
80309+ if (gr_is_global_nonroot(cred->uid))
80310+ ret = -EACCES;
80311+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
80312+ if (gr_is_global_nonroot(cred->uid) && !groups_search(cred->group_info, grsec_proc_gid))
80313+ ret = -EACCES;
80314+#endif
80315+#endif
80316+ if (gr_status & GR_READY) {
80317+ if (!(task->acl->mode & GR_VIEW))
80318+ ret = -EACCES;
80319+ }
80320+ } else
80321+ ret = -ENOENT;
80322+
80323+ read_unlock(&tasklist_lock);
80324+ rcu_read_unlock();
80325+
80326+ return ret;
80327+}
80328+#endif
80329+
80330+/* AUXV entries are filled via a descendant of search_binary_handler
80331+ after we've already applied the subject for the target
80332+*/
80333+int gr_acl_enable_at_secure(void)
80334+{
80335+ if (unlikely(!(gr_status & GR_READY)))
80336+ return 0;
80337+
80338+ if (current->acl->mode & GR_ATSECURE)
80339+ return 1;
80340+
80341+ return 0;
80342+}
80343+
80344+int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const u64 ino)
80345+{
80346+ struct task_struct *task = current;
80347+ struct dentry *dentry = file->f_path.dentry;
80348+ struct vfsmount *mnt = file->f_path.mnt;
80349+ struct acl_object_label *obj, *tmp;
80350+ struct acl_subject_label *subj;
80351+ unsigned int bufsize;
80352+ int is_not_root;
80353+ char *path;
80354+ dev_t dev = __get_dev(dentry);
80355+
80356+ if (unlikely(!(gr_status & GR_READY)))
80357+ return 1;
80358+
80359+ if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
80360+ return 1;
80361+
80362+ /* ignore Eric Biederman */
80363+ if (IS_PRIVATE(dentry->d_inode))
80364+ return 1;
80365+
80366+ subj = task->acl;
80367+ read_lock(&gr_inode_lock);
80368+ do {
80369+ obj = lookup_acl_obj_label(ino, dev, subj);
80370+ if (obj != NULL) {
80371+ read_unlock(&gr_inode_lock);
80372+ return (obj->mode & GR_FIND) ? 1 : 0;
80373+ }
80374+ } while ((subj = subj->parent_subject));
80375+ read_unlock(&gr_inode_lock);
80376+
80377+ /* this is purely an optimization since we're looking for an object
80378+ for the directory we're doing a readdir on
80379+ if it's possible for any globbed object to match the entry we're
80380+ filling into the directory, then the object we find here will be
80381+ an anchor point with attached globbed objects
80382+ */
80383+ obj = chk_obj_label_noglob(dentry, mnt, task->acl);
80384+ if (obj->globbed == NULL)
80385+ return (obj->mode & GR_FIND) ? 1 : 0;
80386+
80387+ is_not_root = ((obj->filename[0] == '/') &&
80388+ (obj->filename[1] == '\0')) ? 0 : 1;
80389+ bufsize = PAGE_SIZE - namelen - is_not_root;
80390+
80391+ /* check bufsize > PAGE_SIZE || bufsize == 0 */
80392+ if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
80393+ return 1;
80394+
80395+ preempt_disable();
80396+ path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
80397+ bufsize);
80398+
80399+ bufsize = strlen(path);
80400+
80401+ /* if base is "/", don't append an additional slash */
80402+ if (is_not_root)
80403+ *(path + bufsize) = '/';
80404+ memcpy(path + bufsize + is_not_root, name, namelen);
80405+ *(path + bufsize + namelen + is_not_root) = '\0';
80406+
80407+ tmp = obj->globbed;
80408+ while (tmp) {
80409+ if (!glob_match(tmp->filename, path)) {
80410+ preempt_enable();
80411+ return (tmp->mode & GR_FIND) ? 1 : 0;
80412+ }
80413+ tmp = tmp->next;
80414+ }
80415+ preempt_enable();
80416+ return (obj->mode & GR_FIND) ? 1 : 0;
80417+}
80418+
80419+void gr_put_exec_file(struct task_struct *task)
80420+{
80421+ struct file *filp;
80422+
80423+ write_lock(&grsec_exec_file_lock);
80424+ filp = task->exec_file;
80425+ task->exec_file = NULL;
80426+ write_unlock(&grsec_exec_file_lock);
80427+
80428+ if (filp)
80429+ fput(filp);
80430+
80431+ return;
80432+}
80433+
80434+
80435+#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
80436+EXPORT_SYMBOL_GPL(gr_acl_is_enabled);
80437+#endif
80438+#ifdef CONFIG_SECURITY
80439+EXPORT_SYMBOL_GPL(gr_check_user_change);
80440+EXPORT_SYMBOL_GPL(gr_check_group_change);
80441+#endif
80442+
80443diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c
80444new file mode 100644
80445index 0000000..9adc75c
80446--- /dev/null
80447+++ b/grsecurity/gracl_alloc.c
80448@@ -0,0 +1,105 @@
80449+#include <linux/kernel.h>
80450+#include <linux/mm.h>
80451+#include <linux/slab.h>
80452+#include <linux/vmalloc.h>
80453+#include <linux/gracl.h>
80454+#include <linux/grsecurity.h>
80455+
80456+static struct gr_alloc_state __current_alloc_state = { 1, 1, NULL };
80457+struct gr_alloc_state *current_alloc_state = &__current_alloc_state;
80458+
80459+static int
80460+alloc_pop(void)
80461+{
80462+ if (current_alloc_state->alloc_stack_next == 1)
80463+ return 0;
80464+
80465+ kfree(current_alloc_state->alloc_stack[current_alloc_state->alloc_stack_next - 2]);
80466+
80467+ current_alloc_state->alloc_stack_next--;
80468+
80469+ return 1;
80470+}
80471+
80472+static int
80473+alloc_push(void *buf)
80474+{
80475+ if (current_alloc_state->alloc_stack_next >= current_alloc_state->alloc_stack_size)
80476+ return 1;
80477+
80478+ current_alloc_state->alloc_stack[current_alloc_state->alloc_stack_next - 1] = buf;
80479+
80480+ current_alloc_state->alloc_stack_next++;
80481+
80482+ return 0;
80483+}
80484+
80485+void *
80486+acl_alloc(unsigned long len)
80487+{
80488+ void *ret = NULL;
80489+
80490+ if (!len || len > PAGE_SIZE)
80491+ goto out;
80492+
80493+ ret = kmalloc(len, GFP_KERNEL);
80494+
80495+ if (ret) {
80496+ if (alloc_push(ret)) {
80497+ kfree(ret);
80498+ ret = NULL;
80499+ }
80500+ }
80501+
80502+out:
80503+ return ret;
80504+}
80505+
80506+void *
80507+acl_alloc_num(unsigned long num, unsigned long len)
80508+{
80509+ if (!len || (num > (PAGE_SIZE / len)))
80510+ return NULL;
80511+
80512+ return acl_alloc(num * len);
80513+}
80514+
80515+void
80516+acl_free_all(void)
80517+{
80518+ if (!current_alloc_state->alloc_stack)
80519+ return;
80520+
80521+ while (alloc_pop()) ;
80522+
80523+ if (current_alloc_state->alloc_stack) {
80524+ if ((current_alloc_state->alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
80525+ kfree(current_alloc_state->alloc_stack);
80526+ else
80527+ vfree(current_alloc_state->alloc_stack);
80528+ }
80529+
80530+ current_alloc_state->alloc_stack = NULL;
80531+ current_alloc_state->alloc_stack_size = 1;
80532+ current_alloc_state->alloc_stack_next = 1;
80533+
80534+ return;
80535+}
80536+
80537+int
80538+acl_alloc_stack_init(unsigned long size)
80539+{
80540+ if ((size * sizeof (void *)) <= PAGE_SIZE)
80541+ current_alloc_state->alloc_stack =
80542+ (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
80543+ else
80544+ current_alloc_state->alloc_stack = (void **) vmalloc(size * sizeof (void *));
80545+
80546+ current_alloc_state->alloc_stack_size = size;
80547+ current_alloc_state->alloc_stack_next = 1;
80548+
80549+ if (!current_alloc_state->alloc_stack)
80550+ return 0;
80551+ else
80552+ return 1;
80553+}
80554diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c
80555new file mode 100644
80556index 0000000..1a94c11
80557--- /dev/null
80558+++ b/grsecurity/gracl_cap.c
80559@@ -0,0 +1,127 @@
80560+#include <linux/kernel.h>
80561+#include <linux/module.h>
80562+#include <linux/sched.h>
80563+#include <linux/gracl.h>
80564+#include <linux/grsecurity.h>
80565+#include <linux/grinternal.h>
80566+
80567+extern const char *captab_log[];
80568+extern int captab_log_entries;
80569+
80570+int gr_learn_cap(const struct task_struct *task, const struct cred *cred, const int cap)
80571+{
80572+ struct acl_subject_label *curracl;
80573+
80574+ if (!gr_acl_is_enabled())
80575+ return 1;
80576+
80577+ curracl = task->acl;
80578+
80579+ if (curracl->mode & (GR_LEARN | GR_INHERITLEARN)) {
80580+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
80581+ task->role->roletype, GR_GLOBAL_UID(cred->uid),
80582+ GR_GLOBAL_GID(cred->gid), task->exec_file ?
80583+ gr_to_filename(task->exec_file->f_path.dentry,
80584+ task->exec_file->f_path.mnt) : curracl->filename,
80585+ curracl->filename, 0UL,
80586+ 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
80587+ return 1;
80588+ }
80589+
80590+ return 0;
80591+}
80592+
80593+int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
80594+{
80595+ struct acl_subject_label *curracl;
80596+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
80597+ kernel_cap_t cap_audit = __cap_empty_set;
80598+
80599+ if (!gr_acl_is_enabled())
80600+ return 1;
80601+
80602+ curracl = task->acl;
80603+
80604+ cap_drop = curracl->cap_lower;
80605+ cap_mask = curracl->cap_mask;
80606+ cap_audit = curracl->cap_invert_audit;
80607+
80608+ while ((curracl = curracl->parent_subject)) {
80609+ /* if the cap isn't specified in the current computed mask but is specified in the
80610+ current level subject, and is lowered in the current level subject, then add
80611+ it to the set of dropped capabilities
80612+ otherwise, add the current level subject's mask to the current computed mask
80613+ */
80614+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
80615+ cap_raise(cap_mask, cap);
80616+ if (cap_raised(curracl->cap_lower, cap))
80617+ cap_raise(cap_drop, cap);
80618+ if (cap_raised(curracl->cap_invert_audit, cap))
80619+ cap_raise(cap_audit, cap);
80620+ }
80621+ }
80622+
80623+ if (!cap_raised(cap_drop, cap)) {
80624+ if (cap_raised(cap_audit, cap))
80625+ gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
80626+ return 1;
80627+ }
80628+
80629+ /* only learn the capability use if the process has the capability in the
80630+ general case, the two uses in sys.c of gr_learn_cap are an exception
80631+ to this rule to ensure any role transition involves what the full-learned
80632+ policy believes in a privileged process
80633+ */
80634+ if (cap_raised(cred->cap_effective, cap) && gr_learn_cap(task, cred, cap))
80635+ return 1;
80636+
80637+ if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
80638+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
80639+
80640+ return 0;
80641+}
80642+
80643+int
80644+gr_acl_is_capable(const int cap)
80645+{
80646+ return gr_task_acl_is_capable(current, current_cred(), cap);
80647+}
80648+
80649+int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap)
80650+{
80651+ struct acl_subject_label *curracl;
80652+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
80653+
80654+ if (!gr_acl_is_enabled())
80655+ return 1;
80656+
80657+ curracl = task->acl;
80658+
80659+ cap_drop = curracl->cap_lower;
80660+ cap_mask = curracl->cap_mask;
80661+
80662+ while ((curracl = curracl->parent_subject)) {
80663+ /* if the cap isn't specified in the current computed mask but is specified in the
80664+ current level subject, and is lowered in the current level subject, then add
80665+ it to the set of dropped capabilities
80666+ otherwise, add the current level subject's mask to the current computed mask
80667+ */
80668+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
80669+ cap_raise(cap_mask, cap);
80670+ if (cap_raised(curracl->cap_lower, cap))
80671+ cap_raise(cap_drop, cap);
80672+ }
80673+ }
80674+
80675+ if (!cap_raised(cap_drop, cap))
80676+ return 1;
80677+
80678+ return 0;
80679+}
80680+
80681+int
80682+gr_acl_is_capable_nolog(const int cap)
80683+{
80684+ return gr_task_acl_is_capable_nolog(current, cap);
80685+}
80686+
80687diff --git a/grsecurity/gracl_compat.c b/grsecurity/gracl_compat.c
80688new file mode 100644
80689index 0000000..a43dd06
80690--- /dev/null
80691+++ b/grsecurity/gracl_compat.c
80692@@ -0,0 +1,269 @@
80693+#include <linux/kernel.h>
80694+#include <linux/gracl.h>
80695+#include <linux/compat.h>
80696+#include <linux/gracl_compat.h>
80697+
80698+#include <asm/uaccess.h>
80699+
80700+int copy_gr_arg_wrapper_compat(const char *buf, struct gr_arg_wrapper *uwrap)
80701+{
80702+ struct gr_arg_wrapper_compat uwrapcompat;
80703+
80704+ if (copy_from_user(&uwrapcompat, buf, sizeof(uwrapcompat)))
80705+ return -EFAULT;
80706+
80707+ if ((uwrapcompat.version != GRSECURITY_VERSION) ||
80708+ (uwrapcompat.size != sizeof(struct gr_arg_compat)))
80709+ return -EINVAL;
80710+
80711+ uwrap->arg = compat_ptr(uwrapcompat.arg);
80712+ uwrap->version = uwrapcompat.version;
80713+ uwrap->size = sizeof(struct gr_arg);
80714+
80715+ return 0;
80716+}
80717+
80718+int copy_gr_arg_compat(const struct gr_arg __user *buf, struct gr_arg *arg)
80719+{
80720+ struct gr_arg_compat argcompat;
80721+
80722+ if (copy_from_user(&argcompat, buf, sizeof(argcompat)))
80723+ return -EFAULT;
80724+
80725+ arg->role_db.r_table = compat_ptr(argcompat.role_db.r_table);
80726+ arg->role_db.num_pointers = argcompat.role_db.num_pointers;
80727+ arg->role_db.num_roles = argcompat.role_db.num_roles;
80728+ arg->role_db.num_domain_children = argcompat.role_db.num_domain_children;
80729+ arg->role_db.num_subjects = argcompat.role_db.num_subjects;
80730+ arg->role_db.num_objects = argcompat.role_db.num_objects;
80731+
80732+ memcpy(&arg->pw, &argcompat.pw, sizeof(arg->pw));
80733+ memcpy(&arg->salt, &argcompat.salt, sizeof(arg->salt));
80734+ memcpy(&arg->sum, &argcompat.sum, sizeof(arg->sum));
80735+ memcpy(&arg->sp_role, &argcompat.sp_role, sizeof(arg->sp_role));
80736+ arg->sprole_pws = compat_ptr(argcompat.sprole_pws);
80737+ arg->segv_device = argcompat.segv_device;
80738+ arg->segv_inode = argcompat.segv_inode;
80739+ arg->segv_uid = argcompat.segv_uid;
80740+ arg->num_sprole_pws = argcompat.num_sprole_pws;
80741+ arg->mode = argcompat.mode;
80742+
80743+ return 0;
80744+}
80745+
80746+int copy_acl_object_label_compat(struct acl_object_label *obj, const struct acl_object_label *userp)
80747+{
80748+ struct acl_object_label_compat objcompat;
80749+
80750+ if (copy_from_user(&objcompat, userp, sizeof(objcompat)))
80751+ return -EFAULT;
80752+
80753+ obj->filename = compat_ptr(objcompat.filename);
80754+ obj->inode = objcompat.inode;
80755+ obj->device = objcompat.device;
80756+ obj->mode = objcompat.mode;
80757+
80758+ obj->nested = compat_ptr(objcompat.nested);
80759+ obj->globbed = compat_ptr(objcompat.globbed);
80760+
80761+ obj->prev = compat_ptr(objcompat.prev);
80762+ obj->next = compat_ptr(objcompat.next);
80763+
80764+ return 0;
80765+}
80766+
80767+int copy_acl_subject_label_compat(struct acl_subject_label *subj, const struct acl_subject_label *userp)
80768+{
80769+ unsigned int i;
80770+ struct acl_subject_label_compat subjcompat;
80771+
80772+ if (copy_from_user(&subjcompat, userp, sizeof(subjcompat)))
80773+ return -EFAULT;
80774+
80775+ subj->filename = compat_ptr(subjcompat.filename);
80776+ subj->inode = subjcompat.inode;
80777+ subj->device = subjcompat.device;
80778+ subj->mode = subjcompat.mode;
80779+ subj->cap_mask = subjcompat.cap_mask;
80780+ subj->cap_lower = subjcompat.cap_lower;
80781+ subj->cap_invert_audit = subjcompat.cap_invert_audit;
80782+
80783+ for (i = 0; i < GR_NLIMITS; i++) {
80784+ if (subjcompat.res[i].rlim_cur == COMPAT_RLIM_INFINITY)
80785+ subj->res[i].rlim_cur = RLIM_INFINITY;
80786+ else
80787+ subj->res[i].rlim_cur = subjcompat.res[i].rlim_cur;
80788+ if (subjcompat.res[i].rlim_max == COMPAT_RLIM_INFINITY)
80789+ subj->res[i].rlim_max = RLIM_INFINITY;
80790+ else
80791+ subj->res[i].rlim_max = subjcompat.res[i].rlim_max;
80792+ }
80793+ subj->resmask = subjcompat.resmask;
80794+
80795+ subj->user_trans_type = subjcompat.user_trans_type;
80796+ subj->group_trans_type = subjcompat.group_trans_type;
80797+ subj->user_transitions = compat_ptr(subjcompat.user_transitions);
80798+ subj->group_transitions = compat_ptr(subjcompat.group_transitions);
80799+ subj->user_trans_num = subjcompat.user_trans_num;
80800+ subj->group_trans_num = subjcompat.group_trans_num;
80801+
80802+ memcpy(&subj->sock_families, &subjcompat.sock_families, sizeof(subj->sock_families));
80803+ memcpy(&subj->ip_proto, &subjcompat.ip_proto, sizeof(subj->ip_proto));
80804+ subj->ip_type = subjcompat.ip_type;
80805+ subj->ips = compat_ptr(subjcompat.ips);
80806+ subj->ip_num = subjcompat.ip_num;
80807+ subj->inaddr_any_override = subjcompat.inaddr_any_override;
80808+
80809+ subj->crashes = subjcompat.crashes;
80810+ subj->expires = subjcompat.expires;
80811+
80812+ subj->parent_subject = compat_ptr(subjcompat.parent_subject);
80813+ subj->hash = compat_ptr(subjcompat.hash);
80814+ subj->prev = compat_ptr(subjcompat.prev);
80815+ subj->next = compat_ptr(subjcompat.next);
80816+
80817+ subj->obj_hash = compat_ptr(subjcompat.obj_hash);
80818+ subj->obj_hash_size = subjcompat.obj_hash_size;
80819+ subj->pax_flags = subjcompat.pax_flags;
80820+
80821+ return 0;
80822+}
80823+
80824+int copy_acl_role_label_compat(struct acl_role_label *role, const struct acl_role_label *userp)
80825+{
80826+ struct acl_role_label_compat rolecompat;
80827+
80828+ if (copy_from_user(&rolecompat, userp, sizeof(rolecompat)))
80829+ return -EFAULT;
80830+
80831+ role->rolename = compat_ptr(rolecompat.rolename);
80832+ role->uidgid = rolecompat.uidgid;
80833+ role->roletype = rolecompat.roletype;
80834+
80835+ role->auth_attempts = rolecompat.auth_attempts;
80836+ role->expires = rolecompat.expires;
80837+
80838+ role->root_label = compat_ptr(rolecompat.root_label);
80839+ role->hash = compat_ptr(rolecompat.hash);
80840+
80841+ role->prev = compat_ptr(rolecompat.prev);
80842+ role->next = compat_ptr(rolecompat.next);
80843+
80844+ role->transitions = compat_ptr(rolecompat.transitions);
80845+ role->allowed_ips = compat_ptr(rolecompat.allowed_ips);
80846+ role->domain_children = compat_ptr(rolecompat.domain_children);
80847+ role->domain_child_num = rolecompat.domain_child_num;
80848+
80849+ role->umask = rolecompat.umask;
80850+
80851+ role->subj_hash = compat_ptr(rolecompat.subj_hash);
80852+ role->subj_hash_size = rolecompat.subj_hash_size;
80853+
80854+ return 0;
80855+}
80856+
80857+int copy_role_allowed_ip_compat(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp)
80858+{
80859+ struct role_allowed_ip_compat roleip_compat;
80860+
80861+ if (copy_from_user(&roleip_compat, userp, sizeof(roleip_compat)))
80862+ return -EFAULT;
80863+
80864+ roleip->addr = roleip_compat.addr;
80865+ roleip->netmask = roleip_compat.netmask;
80866+
80867+ roleip->prev = compat_ptr(roleip_compat.prev);
80868+ roleip->next = compat_ptr(roleip_compat.next);
80869+
80870+ return 0;
80871+}
80872+
80873+int copy_role_transition_compat(struct role_transition *trans, const struct role_transition *userp)
80874+{
80875+ struct role_transition_compat trans_compat;
80876+
80877+ if (copy_from_user(&trans_compat, userp, sizeof(trans_compat)))
80878+ return -EFAULT;
80879+
80880+ trans->rolename = compat_ptr(trans_compat.rolename);
80881+
80882+ trans->prev = compat_ptr(trans_compat.prev);
80883+ trans->next = compat_ptr(trans_compat.next);
80884+
80885+ return 0;
80886+
80887+}
80888+
80889+int copy_gr_hash_struct_compat(struct gr_hash_struct *hash, const struct gr_hash_struct *userp)
80890+{
80891+ struct gr_hash_struct_compat hash_compat;
80892+
80893+ if (copy_from_user(&hash_compat, userp, sizeof(hash_compat)))
80894+ return -EFAULT;
80895+
80896+ hash->table = compat_ptr(hash_compat.table);
80897+ hash->nametable = compat_ptr(hash_compat.nametable);
80898+ hash->first = compat_ptr(hash_compat.first);
80899+
80900+ hash->table_size = hash_compat.table_size;
80901+ hash->used_size = hash_compat.used_size;
80902+
80903+ hash->type = hash_compat.type;
80904+
80905+ return 0;
80906+}
80907+
80908+int copy_pointer_from_array_compat(void *ptr, unsigned long idx, const void *userp)
80909+{
80910+ compat_uptr_t ptrcompat;
80911+
80912+ if (copy_from_user(&ptrcompat, userp + (idx * sizeof(ptrcompat)), sizeof(ptrcompat)))
80913+ return -EFAULT;
80914+
80915+ *(void **)ptr = compat_ptr(ptrcompat);
80916+
80917+ return 0;
80918+}
80919+
80920+int copy_acl_ip_label_compat(struct acl_ip_label *ip, const struct acl_ip_label *userp)
80921+{
80922+ struct acl_ip_label_compat ip_compat;
80923+
80924+ if (copy_from_user(&ip_compat, userp, sizeof(ip_compat)))
80925+ return -EFAULT;
80926+
80927+ ip->iface = compat_ptr(ip_compat.iface);
80928+ ip->addr = ip_compat.addr;
80929+ ip->netmask = ip_compat.netmask;
80930+ ip->low = ip_compat.low;
80931+ ip->high = ip_compat.high;
80932+ ip->mode = ip_compat.mode;
80933+ ip->type = ip_compat.type;
80934+
80935+ memcpy(&ip->proto, &ip_compat.proto, sizeof(ip->proto));
80936+
80937+ ip->prev = compat_ptr(ip_compat.prev);
80938+ ip->next = compat_ptr(ip_compat.next);
80939+
80940+ return 0;
80941+}
80942+
80943+int copy_sprole_pw_compat(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp)
80944+{
80945+ struct sprole_pw_compat pw_compat;
80946+
80947+ if (copy_from_user(&pw_compat, (const void *)userp + (sizeof(pw_compat) * idx), sizeof(pw_compat)))
80948+ return -EFAULT;
80949+
80950+ pw->rolename = compat_ptr(pw_compat.rolename);
80951+ memcpy(&pw->salt, pw_compat.salt, sizeof(pw->salt));
80952+ memcpy(&pw->sum, pw_compat.sum, sizeof(pw->sum));
80953+
80954+ return 0;
80955+}
80956+
80957+size_t get_gr_arg_wrapper_size_compat(void)
80958+{
80959+ return sizeof(struct gr_arg_wrapper_compat);
80960+}
80961+
80962diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c
80963new file mode 100644
80964index 0000000..8ee8e4f
80965--- /dev/null
80966+++ b/grsecurity/gracl_fs.c
80967@@ -0,0 +1,447 @@
80968+#include <linux/kernel.h>
80969+#include <linux/sched.h>
80970+#include <linux/types.h>
80971+#include <linux/fs.h>
80972+#include <linux/file.h>
80973+#include <linux/stat.h>
80974+#include <linux/grsecurity.h>
80975+#include <linux/grinternal.h>
80976+#include <linux/gracl.h>
80977+
80978+umode_t
80979+gr_acl_umask(void)
80980+{
80981+ if (unlikely(!gr_acl_is_enabled()))
80982+ return 0;
80983+
80984+ return current->role->umask;
80985+}
80986+
80987+__u32
80988+gr_acl_handle_hidden_file(const struct dentry * dentry,
80989+ const struct vfsmount * mnt)
80990+{
80991+ __u32 mode;
80992+
80993+ if (unlikely(d_is_negative(dentry)))
80994+ return GR_FIND;
80995+
80996+ mode =
80997+ gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
80998+
80999+ if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
81000+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
81001+ return mode;
81002+ } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
81003+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
81004+ return 0;
81005+ } else if (unlikely(!(mode & GR_FIND)))
81006+ return 0;
81007+
81008+ return GR_FIND;
81009+}
81010+
81011+__u32
81012+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
81013+ int acc_mode)
81014+{
81015+ __u32 reqmode = GR_FIND;
81016+ __u32 mode;
81017+
81018+ if (unlikely(d_is_negative(dentry)))
81019+ return reqmode;
81020+
81021+ if (acc_mode & MAY_APPEND)
81022+ reqmode |= GR_APPEND;
81023+ else if (acc_mode & MAY_WRITE)
81024+ reqmode |= GR_WRITE;
81025+ if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
81026+ reqmode |= GR_READ;
81027+
81028+ mode =
81029+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
81030+ mnt);
81031+
81032+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
81033+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
81034+ reqmode & GR_READ ? " reading" : "",
81035+ reqmode & GR_WRITE ? " writing" : reqmode &
81036+ GR_APPEND ? " appending" : "");
81037+ return reqmode;
81038+ } else
81039+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
81040+ {
81041+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
81042+ reqmode & GR_READ ? " reading" : "",
81043+ reqmode & GR_WRITE ? " writing" : reqmode &
81044+ GR_APPEND ? " appending" : "");
81045+ return 0;
81046+ } else if (unlikely((mode & reqmode) != reqmode))
81047+ return 0;
81048+
81049+ return reqmode;
81050+}
81051+
81052+__u32
81053+gr_acl_handle_creat(const struct dentry * dentry,
81054+ const struct dentry * p_dentry,
81055+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
81056+ const int imode)
81057+{
81058+ __u32 reqmode = GR_WRITE | GR_CREATE;
81059+ __u32 mode;
81060+
81061+ if (acc_mode & MAY_APPEND)
81062+ reqmode |= GR_APPEND;
81063+ // if a directory was required or the directory already exists, then
81064+ // don't count this open as a read
81065+ if ((acc_mode & MAY_READ) &&
81066+ !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
81067+ reqmode |= GR_READ;
81068+ if ((open_flags & O_CREAT) &&
81069+ ((imode & S_ISUID) || ((imode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))
81070+ reqmode |= GR_SETID;
81071+
81072+ mode =
81073+ gr_check_create(dentry, p_dentry, p_mnt,
81074+ reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
81075+
81076+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
81077+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
81078+ reqmode & GR_READ ? " reading" : "",
81079+ reqmode & GR_WRITE ? " writing" : reqmode &
81080+ GR_APPEND ? " appending" : "");
81081+ return reqmode;
81082+ } else
81083+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
81084+ {
81085+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
81086+ reqmode & GR_READ ? " reading" : "",
81087+ reqmode & GR_WRITE ? " writing" : reqmode &
81088+ GR_APPEND ? " appending" : "");
81089+ return 0;
81090+ } else if (unlikely((mode & reqmode) != reqmode))
81091+ return 0;
81092+
81093+ return reqmode;
81094+}
81095+
81096+__u32
81097+gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
81098+ const int fmode)
81099+{
81100+ __u32 mode, reqmode = GR_FIND;
81101+
81102+ if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
81103+ reqmode |= GR_EXEC;
81104+ if (fmode & S_IWOTH)
81105+ reqmode |= GR_WRITE;
81106+ if (fmode & S_IROTH)
81107+ reqmode |= GR_READ;
81108+
81109+ mode =
81110+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
81111+ mnt);
81112+
81113+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
81114+ gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
81115+ reqmode & GR_READ ? " reading" : "",
81116+ reqmode & GR_WRITE ? " writing" : "",
81117+ reqmode & GR_EXEC ? " executing" : "");
81118+ return reqmode;
81119+ } else
81120+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
81121+ {
81122+ gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
81123+ reqmode & GR_READ ? " reading" : "",
81124+ reqmode & GR_WRITE ? " writing" : "",
81125+ reqmode & GR_EXEC ? " executing" : "");
81126+ return 0;
81127+ } else if (unlikely((mode & reqmode) != reqmode))
81128+ return 0;
81129+
81130+ return reqmode;
81131+}
81132+
81133+static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
81134+{
81135+ __u32 mode;
81136+
81137+ mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
81138+
81139+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
81140+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
81141+ return mode;
81142+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
81143+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
81144+ return 0;
81145+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
81146+ return 0;
81147+
81148+ return (reqmode);
81149+}
81150+
81151+__u32
81152+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
81153+{
81154+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
81155+}
81156+
81157+__u32
81158+gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
81159+{
81160+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
81161+}
81162+
81163+__u32
81164+gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
81165+{
81166+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
81167+}
81168+
81169+__u32
81170+gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
81171+{
81172+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
81173+}
81174+
81175+__u32
81176+gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
81177+ umode_t *modeptr)
81178+{
81179+ umode_t mode;
81180+
81181+ *modeptr &= ~gr_acl_umask();
81182+ mode = *modeptr;
81183+
81184+ if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
81185+ return 1;
81186+
81187+ if (unlikely(dentry->d_inode && !S_ISDIR(dentry->d_inode->i_mode) &&
81188+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))) {
81189+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
81190+ GR_CHMOD_ACL_MSG);
81191+ } else {
81192+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
81193+ }
81194+}
81195+
81196+__u32
81197+gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
81198+{
81199+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
81200+}
81201+
81202+__u32
81203+gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
81204+{
81205+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
81206+}
81207+
81208+__u32
81209+gr_acl_handle_removexattr(const struct dentry *dentry, const struct vfsmount *mnt)
81210+{
81211+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_REMOVEXATTR_ACL_MSG);
81212+}
81213+
81214+__u32
81215+gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
81216+{
81217+ return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
81218+}
81219+
81220+__u32
81221+gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
81222+{
81223+ return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
81224+ GR_UNIXCONNECT_ACL_MSG);
81225+}
81226+
81227+/* hardlinks require at minimum create and link permission,
81228+ any additional privilege required is based on the
81229+ privilege of the file being linked to
81230+*/
81231+__u32
81232+gr_acl_handle_link(const struct dentry * new_dentry,
81233+ const struct dentry * parent_dentry,
81234+ const struct vfsmount * parent_mnt,
81235+ const struct dentry * old_dentry,
81236+ const struct vfsmount * old_mnt, const struct filename *to)
81237+{
81238+ __u32 mode;
81239+ __u32 needmode = GR_CREATE | GR_LINK;
81240+ __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
81241+
81242+ mode =
81243+ gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
81244+ old_mnt);
81245+
81246+ if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
81247+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to->name);
81248+ return mode;
81249+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
81250+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to->name);
81251+ return 0;
81252+ } else if (unlikely((mode & needmode) != needmode))
81253+ return 0;
81254+
81255+ return 1;
81256+}
81257+
81258+__u32
81259+gr_acl_handle_symlink(const struct dentry * new_dentry,
81260+ const struct dentry * parent_dentry,
81261+ const struct vfsmount * parent_mnt, const struct filename *from)
81262+{
81263+ __u32 needmode = GR_WRITE | GR_CREATE;
81264+ __u32 mode;
81265+
81266+ mode =
81267+ gr_check_create(new_dentry, parent_dentry, parent_mnt,
81268+ GR_CREATE | GR_AUDIT_CREATE |
81269+ GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
81270+
81271+ if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
81272+ gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from->name, new_dentry, parent_mnt);
81273+ return mode;
81274+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
81275+ gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from->name, new_dentry, parent_mnt);
81276+ return 0;
81277+ } else if (unlikely((mode & needmode) != needmode))
81278+ return 0;
81279+
81280+ return (GR_WRITE | GR_CREATE);
81281+}
81282+
81283+static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
81284+{
81285+ __u32 mode;
81286+
81287+ mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
81288+
81289+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
81290+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
81291+ return mode;
81292+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
81293+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
81294+ return 0;
81295+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
81296+ return 0;
81297+
81298+ return (reqmode);
81299+}
81300+
81301+__u32
81302+gr_acl_handle_mknod(const struct dentry * new_dentry,
81303+ const struct dentry * parent_dentry,
81304+ const struct vfsmount * parent_mnt,
81305+ const int mode)
81306+{
81307+ __u32 reqmode = GR_WRITE | GR_CREATE;
81308+ if (unlikely((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))
81309+ reqmode |= GR_SETID;
81310+
81311+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
81312+ reqmode, GR_MKNOD_ACL_MSG);
81313+}
81314+
81315+__u32
81316+gr_acl_handle_mkdir(const struct dentry *new_dentry,
81317+ const struct dentry *parent_dentry,
81318+ const struct vfsmount *parent_mnt)
81319+{
81320+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
81321+ GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
81322+}
81323+
81324+#define RENAME_CHECK_SUCCESS(old, new) \
81325+ (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
81326+ ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
81327+
81328+int
81329+gr_acl_handle_rename(struct dentry *new_dentry,
81330+ struct dentry *parent_dentry,
81331+ const struct vfsmount *parent_mnt,
81332+ struct dentry *old_dentry,
81333+ struct inode *old_parent_inode,
81334+ struct vfsmount *old_mnt, const struct filename *newname, unsigned int flags)
81335+{
81336+ __u32 comp1, comp2;
81337+ int error = 0;
81338+
81339+ if (unlikely(!gr_acl_is_enabled()))
81340+ return 0;
81341+
81342+ if (flags & RENAME_EXCHANGE) {
81343+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
81344+ GR_AUDIT_READ | GR_AUDIT_WRITE |
81345+ GR_SUPPRESS, parent_mnt);
81346+ comp2 =
81347+ gr_search_file(old_dentry,
81348+ GR_READ | GR_WRITE | GR_AUDIT_READ |
81349+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
81350+ } else if (d_is_negative(new_dentry)) {
81351+ comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
81352+ GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
81353+ GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
81354+ comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
81355+ GR_DELETE | GR_AUDIT_DELETE |
81356+ GR_AUDIT_READ | GR_AUDIT_WRITE |
81357+ GR_SUPPRESS, old_mnt);
81358+ } else {
81359+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
81360+ GR_CREATE | GR_DELETE |
81361+ GR_AUDIT_CREATE | GR_AUDIT_DELETE |
81362+ GR_AUDIT_READ | GR_AUDIT_WRITE |
81363+ GR_SUPPRESS, parent_mnt);
81364+ comp2 =
81365+ gr_search_file(old_dentry,
81366+ GR_READ | GR_WRITE | GR_AUDIT_READ |
81367+ GR_DELETE | GR_AUDIT_DELETE |
81368+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
81369+ }
81370+
81371+ if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
81372+ ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
81373+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname->name);
81374+ else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
81375+ && !(comp2 & GR_SUPPRESS)) {
81376+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname->name);
81377+ error = -EACCES;
81378+ } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
81379+ error = -EACCES;
81380+
81381+ return error;
81382+}
81383+
81384+void
81385+gr_acl_handle_exit(void)
81386+{
81387+ u16 id;
81388+ char *rolename;
81389+
81390+ if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
81391+ !(current->role->roletype & GR_ROLE_PERSIST))) {
81392+ id = current->acl_role_id;
81393+ rolename = current->role->rolename;
81394+ gr_set_acls(1);
81395+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
81396+ }
81397+
81398+ gr_put_exec_file(current);
81399+ return;
81400+}
81401+
81402+int
81403+gr_acl_handle_procpidmem(const struct task_struct *task)
81404+{
81405+ if (unlikely(!gr_acl_is_enabled()))
81406+ return 0;
81407+
81408+ if (task != current && (task->acl->mode & GR_PROTPROCFD) &&
81409+ !(current->acl->mode & GR_POVERRIDE) &&
81410+ !(current->role->roletype & GR_ROLE_GOD))
81411+ return -EACCES;
81412+
81413+ return 0;
81414+}
81415diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
81416new file mode 100644
81417index 0000000..ed6ee43
81418--- /dev/null
81419+++ b/grsecurity/gracl_ip.c
81420@@ -0,0 +1,386 @@
81421+#include <linux/kernel.h>
81422+#include <asm/uaccess.h>
81423+#include <asm/errno.h>
81424+#include <net/sock.h>
81425+#include <linux/file.h>
81426+#include <linux/fs.h>
81427+#include <linux/net.h>
81428+#include <linux/in.h>
81429+#include <linux/skbuff.h>
81430+#include <linux/ip.h>
81431+#include <linux/udp.h>
81432+#include <linux/types.h>
81433+#include <linux/sched.h>
81434+#include <linux/netdevice.h>
81435+#include <linux/inetdevice.h>
81436+#include <linux/gracl.h>
81437+#include <linux/grsecurity.h>
81438+#include <linux/grinternal.h>
81439+
81440+#define GR_BIND 0x01
81441+#define GR_CONNECT 0x02
81442+#define GR_INVERT 0x04
81443+#define GR_BINDOVERRIDE 0x08
81444+#define GR_CONNECTOVERRIDE 0x10
81445+#define GR_SOCK_FAMILY 0x20
81446+
81447+static const char * gr_protocols[IPPROTO_MAX] = {
81448+ "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
81449+ "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
81450+ "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
81451+ "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
81452+ "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
81453+ "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
81454+ "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
81455+ "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
81456+ "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
81457+ "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
81458+ "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
81459+ "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
81460+ "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
81461+ "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
81462+ "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
81463+ "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
81464+ "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
81465+ "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
81466+ "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
81467+ "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
81468+ "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
81469+ "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
81470+ "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
81471+ "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
81472+ "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
81473+ "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
81474+ "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
81475+ "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
81476+ "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
81477+ "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
81478+ "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
81479+ "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
81480+ };
81481+
81482+static const char * gr_socktypes[SOCK_MAX] = {
81483+ "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
81484+ "unknown:7", "unknown:8", "unknown:9", "packet"
81485+ };
81486+
81487+static const char * gr_sockfamilies[AF_MAX+1] = {
81488+ "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
81489+ "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
81490+ "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
81491+ "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf", "alg", "nfc", "vsock"
81492+ };
81493+
81494+const char *
81495+gr_proto_to_name(unsigned char proto)
81496+{
81497+ return gr_protocols[proto];
81498+}
81499+
81500+const char *
81501+gr_socktype_to_name(unsigned char type)
81502+{
81503+ return gr_socktypes[type];
81504+}
81505+
81506+const char *
81507+gr_sockfamily_to_name(unsigned char family)
81508+{
81509+ return gr_sockfamilies[family];
81510+}
81511+
81512+extern const struct net_proto_family __rcu *net_families[NPROTO] __read_mostly;
81513+
81514+int
81515+gr_search_socket(const int domain, const int type, const int protocol)
81516+{
81517+ struct acl_subject_label *curr;
81518+ const struct cred *cred = current_cred();
81519+
81520+ if (unlikely(!gr_acl_is_enabled()))
81521+ goto exit;
81522+
81523+ if ((domain < 0) || (type < 0) || (protocol < 0) ||
81524+ (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
81525+ goto exit; // let the kernel handle it
81526+
81527+ curr = current->acl;
81528+
81529+ if (curr->sock_families[domain / 32] & (1U << (domain % 32))) {
81530+ /* the family is allowed, if this is PF_INET allow it only if
81531+ the extra sock type/protocol checks pass */
81532+ if (domain == PF_INET)
81533+ goto inet_check;
81534+ goto exit;
81535+ } else {
81536+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
81537+ __u32 fakeip = 0;
81538+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
81539+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
81540+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
81541+ gr_to_filename(current->exec_file->f_path.dentry,
81542+ current->exec_file->f_path.mnt) :
81543+ curr->filename, curr->filename,
81544+ &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
81545+ &current->signal->saved_ip);
81546+ goto exit;
81547+ }
81548+ goto exit_fail;
81549+ }
81550+
81551+inet_check:
81552+ /* the rest of this checking is for IPv4 only */
81553+ if (!curr->ips)
81554+ goto exit;
81555+
81556+ if ((curr->ip_type & (1U << type)) &&
81557+ (curr->ip_proto[protocol / 32] & (1U << (protocol % 32))))
81558+ goto exit;
81559+
81560+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
81561+ /* we don't place acls on raw sockets , and sometimes
81562+ dgram/ip sockets are opened for ioctl and not
81563+ bind/connect, so we'll fake a bind learn log */
81564+ if (type == SOCK_RAW || type == SOCK_PACKET) {
81565+ __u32 fakeip = 0;
81566+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
81567+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
81568+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
81569+ gr_to_filename(current->exec_file->f_path.dentry,
81570+ current->exec_file->f_path.mnt) :
81571+ curr->filename, curr->filename,
81572+ &fakeip, 0, type,
81573+ protocol, GR_CONNECT, &current->signal->saved_ip);
81574+ } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
81575+ __u32 fakeip = 0;
81576+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
81577+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
81578+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
81579+ gr_to_filename(current->exec_file->f_path.dentry,
81580+ current->exec_file->f_path.mnt) :
81581+ curr->filename, curr->filename,
81582+ &fakeip, 0, type,
81583+ protocol, GR_BIND, &current->signal->saved_ip);
81584+ }
81585+ /* we'll log when they use connect or bind */
81586+ goto exit;
81587+ }
81588+
81589+exit_fail:
81590+ if (domain == PF_INET)
81591+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
81592+ gr_socktype_to_name(type), gr_proto_to_name(protocol));
81593+ else if (rcu_access_pointer(net_families[domain]) != NULL)
81594+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
81595+ gr_socktype_to_name(type), protocol);
81596+
81597+ return 0;
81598+exit:
81599+ return 1;
81600+}
81601+
81602+int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
81603+{
81604+ if ((ip->mode & mode) &&
81605+ (ip_port >= ip->low) &&
81606+ (ip_port <= ip->high) &&
81607+ ((ntohl(ip_addr) & our_netmask) ==
81608+ (ntohl(our_addr) & our_netmask))
81609+ && (ip->proto[protocol / 32] & (1U << (protocol % 32)))
81610+ && (ip->type & (1U << type))) {
81611+ if (ip->mode & GR_INVERT)
81612+ return 2; // specifically denied
81613+ else
81614+ return 1; // allowed
81615+ }
81616+
81617+ return 0; // not specifically allowed, may continue parsing
81618+}
81619+
81620+static int
81621+gr_search_connectbind(const int full_mode, struct sock *sk,
81622+ struct sockaddr_in *addr, const int type)
81623+{
81624+ char iface[IFNAMSIZ] = {0};
81625+ struct acl_subject_label *curr;
81626+ struct acl_ip_label *ip;
81627+ struct inet_sock *isk;
81628+ struct net_device *dev;
81629+ struct in_device *idev;
81630+ unsigned long i;
81631+ int ret;
81632+ int mode = full_mode & (GR_BIND | GR_CONNECT);
81633+ __u32 ip_addr = 0;
81634+ __u32 our_addr;
81635+ __u32 our_netmask;
81636+ char *p;
81637+ __u16 ip_port = 0;
81638+ const struct cred *cred = current_cred();
81639+
81640+ if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
81641+ return 0;
81642+
81643+ curr = current->acl;
81644+ isk = inet_sk(sk);
81645+
81646+ /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
81647+ if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
81648+ addr->sin_addr.s_addr = curr->inaddr_any_override;
81649+ if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
81650+ struct sockaddr_in saddr;
81651+ int err;
81652+
81653+ saddr.sin_family = AF_INET;
81654+ saddr.sin_addr.s_addr = curr->inaddr_any_override;
81655+ saddr.sin_port = isk->inet_sport;
81656+
81657+ err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
81658+ if (err)
81659+ return err;
81660+
81661+ err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
81662+ if (err)
81663+ return err;
81664+ }
81665+
81666+ if (!curr->ips)
81667+ return 0;
81668+
81669+ ip_addr = addr->sin_addr.s_addr;
81670+ ip_port = ntohs(addr->sin_port);
81671+
81672+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
81673+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
81674+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
81675+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
81676+ gr_to_filename(current->exec_file->f_path.dentry,
81677+ current->exec_file->f_path.mnt) :
81678+ curr->filename, curr->filename,
81679+ &ip_addr, ip_port, type,
81680+ sk->sk_protocol, mode, &current->signal->saved_ip);
81681+ return 0;
81682+ }
81683+
81684+ for (i = 0; i < curr->ip_num; i++) {
81685+ ip = *(curr->ips + i);
81686+ if (ip->iface != NULL) {
81687+ strncpy(iface, ip->iface, IFNAMSIZ - 1);
81688+ p = strchr(iface, ':');
81689+ if (p != NULL)
81690+ *p = '\0';
81691+ dev = dev_get_by_name(sock_net(sk), iface);
81692+ if (dev == NULL)
81693+ continue;
81694+ idev = in_dev_get(dev);
81695+ if (idev == NULL) {
81696+ dev_put(dev);
81697+ continue;
81698+ }
81699+ rcu_read_lock();
81700+ for_ifa(idev) {
81701+ if (!strcmp(ip->iface, ifa->ifa_label)) {
81702+ our_addr = ifa->ifa_address;
81703+ our_netmask = 0xffffffff;
81704+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
81705+ if (ret == 1) {
81706+ rcu_read_unlock();
81707+ in_dev_put(idev);
81708+ dev_put(dev);
81709+ return 0;
81710+ } else if (ret == 2) {
81711+ rcu_read_unlock();
81712+ in_dev_put(idev);
81713+ dev_put(dev);
81714+ goto denied;
81715+ }
81716+ }
81717+ } endfor_ifa(idev);
81718+ rcu_read_unlock();
81719+ in_dev_put(idev);
81720+ dev_put(dev);
81721+ } else {
81722+ our_addr = ip->addr;
81723+ our_netmask = ip->netmask;
81724+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
81725+ if (ret == 1)
81726+ return 0;
81727+ else if (ret == 2)
81728+ goto denied;
81729+ }
81730+ }
81731+
81732+denied:
81733+ if (mode == GR_BIND)
81734+ gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
81735+ else if (mode == GR_CONNECT)
81736+ gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
81737+
81738+ return -EACCES;
81739+}
81740+
81741+int
81742+gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
81743+{
81744+ /* always allow disconnection of dgram sockets with connect */
81745+ if (addr->sin_family == AF_UNSPEC)
81746+ return 0;
81747+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
81748+}
81749+
81750+int
81751+gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
81752+{
81753+ return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
81754+}
81755+
81756+int gr_search_listen(struct socket *sock)
81757+{
81758+ struct sock *sk = sock->sk;
81759+ struct sockaddr_in addr;
81760+
81761+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
81762+ addr.sin_port = inet_sk(sk)->inet_sport;
81763+
81764+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
81765+}
81766+
81767+int gr_search_accept(struct socket *sock)
81768+{
81769+ struct sock *sk = sock->sk;
81770+ struct sockaddr_in addr;
81771+
81772+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
81773+ addr.sin_port = inet_sk(sk)->inet_sport;
81774+
81775+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
81776+}
81777+
81778+int
81779+gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
81780+{
81781+ if (addr)
81782+ return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
81783+ else {
81784+ struct sockaddr_in sin;
81785+ const struct inet_sock *inet = inet_sk(sk);
81786+
81787+ sin.sin_addr.s_addr = inet->inet_daddr;
81788+ sin.sin_port = inet->inet_dport;
81789+
81790+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
81791+ }
81792+}
81793+
81794+int
81795+gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
81796+{
81797+ struct sockaddr_in sin;
81798+
81799+ if (unlikely(skb->len < sizeof (struct udphdr)))
81800+ return 0; // skip this packet
81801+
81802+ sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
81803+ sin.sin_port = udp_hdr(skb)->source;
81804+
81805+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
81806+}
81807diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c
81808new file mode 100644
81809index 0000000..25f54ef
81810--- /dev/null
81811+++ b/grsecurity/gracl_learn.c
81812@@ -0,0 +1,207 @@
81813+#include <linux/kernel.h>
81814+#include <linux/mm.h>
81815+#include <linux/sched.h>
81816+#include <linux/poll.h>
81817+#include <linux/string.h>
81818+#include <linux/file.h>
81819+#include <linux/types.h>
81820+#include <linux/vmalloc.h>
81821+#include <linux/grinternal.h>
81822+
81823+extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
81824+ size_t count, loff_t *ppos);
81825+extern int gr_acl_is_enabled(void);
81826+
81827+static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
81828+static int gr_learn_attached;
81829+
81830+/* use a 512k buffer */
81831+#define LEARN_BUFFER_SIZE (512 * 1024)
81832+
81833+static DEFINE_SPINLOCK(gr_learn_lock);
81834+static DEFINE_MUTEX(gr_learn_user_mutex);
81835+
81836+/* we need to maintain two buffers, so that the kernel context of grlearn
81837+ uses a semaphore around the userspace copying, and the other kernel contexts
81838+ use a spinlock when copying into the buffer, since they cannot sleep
81839+*/
81840+static char *learn_buffer;
81841+static char *learn_buffer_user;
81842+static int learn_buffer_len;
81843+static int learn_buffer_user_len;
81844+
81845+static ssize_t
81846+read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
81847+{
81848+ DECLARE_WAITQUEUE(wait, current);
81849+ ssize_t retval = 0;
81850+
81851+ add_wait_queue(&learn_wait, &wait);
81852+ set_current_state(TASK_INTERRUPTIBLE);
81853+ do {
81854+ mutex_lock(&gr_learn_user_mutex);
81855+ spin_lock(&gr_learn_lock);
81856+ if (learn_buffer_len)
81857+ break;
81858+ spin_unlock(&gr_learn_lock);
81859+ mutex_unlock(&gr_learn_user_mutex);
81860+ if (file->f_flags & O_NONBLOCK) {
81861+ retval = -EAGAIN;
81862+ goto out;
81863+ }
81864+ if (signal_pending(current)) {
81865+ retval = -ERESTARTSYS;
81866+ goto out;
81867+ }
81868+
81869+ schedule();
81870+ } while (1);
81871+
81872+ memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
81873+ learn_buffer_user_len = learn_buffer_len;
81874+ retval = learn_buffer_len;
81875+ learn_buffer_len = 0;
81876+
81877+ spin_unlock(&gr_learn_lock);
81878+
81879+ if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
81880+ retval = -EFAULT;
81881+
81882+ mutex_unlock(&gr_learn_user_mutex);
81883+out:
81884+ set_current_state(TASK_RUNNING);
81885+ remove_wait_queue(&learn_wait, &wait);
81886+ return retval;
81887+}
81888+
81889+static unsigned int
81890+poll_learn(struct file * file, poll_table * wait)
81891+{
81892+ poll_wait(file, &learn_wait, wait);
81893+
81894+ if (learn_buffer_len)
81895+ return (POLLIN | POLLRDNORM);
81896+
81897+ return 0;
81898+}
81899+
81900+void
81901+gr_clear_learn_entries(void)
81902+{
81903+ char *tmp;
81904+
81905+ mutex_lock(&gr_learn_user_mutex);
81906+ spin_lock(&gr_learn_lock);
81907+ tmp = learn_buffer;
81908+ learn_buffer = NULL;
81909+ spin_unlock(&gr_learn_lock);
81910+ if (tmp)
81911+ vfree(tmp);
81912+ if (learn_buffer_user != NULL) {
81913+ vfree(learn_buffer_user);
81914+ learn_buffer_user = NULL;
81915+ }
81916+ learn_buffer_len = 0;
81917+ mutex_unlock(&gr_learn_user_mutex);
81918+
81919+ return;
81920+}
81921+
81922+void
81923+gr_add_learn_entry(const char *fmt, ...)
81924+{
81925+ va_list args;
81926+ unsigned int len;
81927+
81928+ if (!gr_learn_attached)
81929+ return;
81930+
81931+ spin_lock(&gr_learn_lock);
81932+
81933+ /* leave a gap at the end so we know when it's "full" but don't have to
81934+ compute the exact length of the string we're trying to append
81935+ */
81936+ if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
81937+ spin_unlock(&gr_learn_lock);
81938+ wake_up_interruptible(&learn_wait);
81939+ return;
81940+ }
81941+ if (learn_buffer == NULL) {
81942+ spin_unlock(&gr_learn_lock);
81943+ return;
81944+ }
81945+
81946+ va_start(args, fmt);
81947+ len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
81948+ va_end(args);
81949+
81950+ learn_buffer_len += len + 1;
81951+
81952+ spin_unlock(&gr_learn_lock);
81953+ wake_up_interruptible(&learn_wait);
81954+
81955+ return;
81956+}
81957+
81958+static int
81959+open_learn(struct inode *inode, struct file *file)
81960+{
81961+ if (file->f_mode & FMODE_READ && gr_learn_attached)
81962+ return -EBUSY;
81963+ if (file->f_mode & FMODE_READ) {
81964+ int retval = 0;
81965+ mutex_lock(&gr_learn_user_mutex);
81966+ if (learn_buffer == NULL)
81967+ learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
81968+ if (learn_buffer_user == NULL)
81969+ learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
81970+ if (learn_buffer == NULL) {
81971+ retval = -ENOMEM;
81972+ goto out_error;
81973+ }
81974+ if (learn_buffer_user == NULL) {
81975+ retval = -ENOMEM;
81976+ goto out_error;
81977+ }
81978+ learn_buffer_len = 0;
81979+ learn_buffer_user_len = 0;
81980+ gr_learn_attached = 1;
81981+out_error:
81982+ mutex_unlock(&gr_learn_user_mutex);
81983+ return retval;
81984+ }
81985+ return 0;
81986+}
81987+
81988+static int
81989+close_learn(struct inode *inode, struct file *file)
81990+{
81991+ if (file->f_mode & FMODE_READ) {
81992+ char *tmp = NULL;
81993+ mutex_lock(&gr_learn_user_mutex);
81994+ spin_lock(&gr_learn_lock);
81995+ tmp = learn_buffer;
81996+ learn_buffer = NULL;
81997+ spin_unlock(&gr_learn_lock);
81998+ if (tmp)
81999+ vfree(tmp);
82000+ if (learn_buffer_user != NULL) {
82001+ vfree(learn_buffer_user);
82002+ learn_buffer_user = NULL;
82003+ }
82004+ learn_buffer_len = 0;
82005+ learn_buffer_user_len = 0;
82006+ gr_learn_attached = 0;
82007+ mutex_unlock(&gr_learn_user_mutex);
82008+ }
82009+
82010+ return 0;
82011+}
82012+
82013+const struct file_operations grsec_fops = {
82014+ .read = read_learn,
82015+ .write = write_grsec_handler,
82016+ .open = open_learn,
82017+ .release = close_learn,
82018+ .poll = poll_learn,
82019+};
82020diff --git a/grsecurity/gracl_policy.c b/grsecurity/gracl_policy.c
82021new file mode 100644
82022index 0000000..fd26052
82023--- /dev/null
82024+++ b/grsecurity/gracl_policy.c
82025@@ -0,0 +1,1781 @@
82026+#include <linux/kernel.h>
82027+#include <linux/module.h>
82028+#include <linux/sched.h>
82029+#include <linux/mm.h>
82030+#include <linux/file.h>
82031+#include <linux/fs.h>
82032+#include <linux/namei.h>
82033+#include <linux/mount.h>
82034+#include <linux/tty.h>
82035+#include <linux/proc_fs.h>
82036+#include <linux/lglock.h>
82037+#include <linux/slab.h>
82038+#include <linux/vmalloc.h>
82039+#include <linux/types.h>
82040+#include <linux/sysctl.h>
82041+#include <linux/netdevice.h>
82042+#include <linux/ptrace.h>
82043+#include <linux/gracl.h>
82044+#include <linux/gralloc.h>
82045+#include <linux/security.h>
82046+#include <linux/grinternal.h>
82047+#include <linux/pid_namespace.h>
82048+#include <linux/stop_machine.h>
82049+#include <linux/fdtable.h>
82050+#include <linux/percpu.h>
82051+#include <linux/lglock.h>
82052+#include <linux/hugetlb.h>
82053+#include <linux/posix-timers.h>
82054+#include "../fs/mount.h"
82055+
82056+#include <asm/uaccess.h>
82057+#include <asm/errno.h>
82058+#include <asm/mman.h>
82059+
82060+extern struct gr_policy_state *polstate;
82061+
82062+#define FOR_EACH_ROLE_START(role) \
82063+ role = polstate->role_list; \
82064+ while (role) {
82065+
82066+#define FOR_EACH_ROLE_END(role) \
82067+ role = role->prev; \
82068+ }
82069+
82070+struct path gr_real_root;
82071+
82072+extern struct gr_alloc_state *current_alloc_state;
82073+
82074+u16 acl_sp_role_value;
82075+
82076+static DEFINE_MUTEX(gr_dev_mutex);
82077+
82078+extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
82079+extern void gr_clear_learn_entries(void);
82080+
82081+struct gr_arg *gr_usermode __read_only;
82082+unsigned char *gr_system_salt __read_only;
82083+unsigned char *gr_system_sum __read_only;
82084+
82085+static unsigned int gr_auth_attempts = 0;
82086+static unsigned long gr_auth_expires = 0UL;
82087+
82088+struct acl_object_label *fakefs_obj_rw;
82089+struct acl_object_label *fakefs_obj_rwx;
82090+
82091+extern int gr_init_uidset(void);
82092+extern void gr_free_uidset(void);
82093+extern void gr_remove_uid(uid_t uid);
82094+extern int gr_find_uid(uid_t uid);
82095+
82096+extern struct acl_subject_label *__gr_get_subject_for_task(const struct gr_policy_state *state, struct task_struct *task, const char *filename, int fallback);
82097+extern void __gr_apply_subject_to_task(struct gr_policy_state *state, struct task_struct *task, struct acl_subject_label *subj);
82098+extern int gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb);
82099+extern void __insert_inodev_entry(const struct gr_policy_state *state, struct inodev_entry *entry);
82100+extern struct acl_role_label *__lookup_acl_role_label(const struct gr_policy_state *state, const struct task_struct *task, const uid_t uid, const gid_t gid);
82101+extern void insert_acl_obj_label(struct acl_object_label *obj, struct acl_subject_label *subj);
82102+extern void insert_acl_subj_label(struct acl_subject_label *obj, struct acl_role_label *role);
82103+extern struct name_entry * __lookup_name_entry(const struct gr_policy_state *state, const char *name);
82104+extern char *gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt);
82105+extern struct acl_subject_label *lookup_acl_subj_label(const u64 ino, const dev_t dev, const struct acl_role_label *role);
82106+extern struct acl_subject_label *lookup_acl_subj_label_deleted(const u64 ino, const dev_t dev, const struct acl_role_label *role);
82107+extern void assign_special_role(const char *rolename);
82108+extern struct acl_subject_label *chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt, const struct acl_role_label *role);
82109+extern int gr_rbac_disable(void *unused);
82110+extern void gr_enable_rbac_system(void);
82111+
82112+static int copy_acl_object_label_normal(struct acl_object_label *obj, const struct acl_object_label *userp)
82113+{
82114+ if (copy_from_user(obj, userp, sizeof(struct acl_object_label)))
82115+ return -EFAULT;
82116+
82117+ return 0;
82118+}
82119+
82120+static int copy_acl_ip_label_normal(struct acl_ip_label *ip, const struct acl_ip_label *userp)
82121+{
82122+ if (copy_from_user(ip, userp, sizeof(struct acl_ip_label)))
82123+ return -EFAULT;
82124+
82125+ return 0;
82126+}
82127+
82128+static int copy_acl_subject_label_normal(struct acl_subject_label *subj, const struct acl_subject_label *userp)
82129+{
82130+ if (copy_from_user(subj, userp, sizeof(struct acl_subject_label)))
82131+ return -EFAULT;
82132+
82133+ return 0;
82134+}
82135+
82136+static int copy_acl_role_label_normal(struct acl_role_label *role, const struct acl_role_label *userp)
82137+{
82138+ if (copy_from_user(role, userp, sizeof(struct acl_role_label)))
82139+ return -EFAULT;
82140+
82141+ return 0;
82142+}
82143+
82144+static int copy_role_allowed_ip_normal(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp)
82145+{
82146+ if (copy_from_user(roleip, userp, sizeof(struct role_allowed_ip)))
82147+ return -EFAULT;
82148+
82149+ return 0;
82150+}
82151+
82152+static int copy_sprole_pw_normal(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp)
82153+{
82154+ if (copy_from_user(pw, userp + idx, sizeof(struct sprole_pw)))
82155+ return -EFAULT;
82156+
82157+ return 0;
82158+}
82159+
82160+static int copy_gr_hash_struct_normal(struct gr_hash_struct *hash, const struct gr_hash_struct *userp)
82161+{
82162+ if (copy_from_user(hash, userp, sizeof(struct gr_hash_struct)))
82163+ return -EFAULT;
82164+
82165+ return 0;
82166+}
82167+
82168+static int copy_role_transition_normal(struct role_transition *trans, const struct role_transition *userp)
82169+{
82170+ if (copy_from_user(trans, userp, sizeof(struct role_transition)))
82171+ return -EFAULT;
82172+
82173+ return 0;
82174+}
82175+
82176+int copy_pointer_from_array_normal(void *ptr, unsigned long idx, const void *userp)
82177+{
82178+ if (copy_from_user(ptr, userp + (idx * sizeof(void *)), sizeof(void *)))
82179+ return -EFAULT;
82180+
82181+ return 0;
82182+}
82183+
82184+static int copy_gr_arg_wrapper_normal(const char __user *buf, struct gr_arg_wrapper *uwrap)
82185+{
82186+ if (copy_from_user(uwrap, buf, sizeof (struct gr_arg_wrapper)))
82187+ return -EFAULT;
82188+
82189+ if ((uwrap->version != GRSECURITY_VERSION) ||
82190+ (uwrap->size != sizeof(struct gr_arg)))
82191+ return -EINVAL;
82192+
82193+ return 0;
82194+}
82195+
82196+static int copy_gr_arg_normal(const struct gr_arg __user *buf, struct gr_arg *arg)
82197+{
82198+ if (copy_from_user(arg, buf, sizeof (struct gr_arg)))
82199+ return -EFAULT;
82200+
82201+ return 0;
82202+}
82203+
82204+static size_t get_gr_arg_wrapper_size_normal(void)
82205+{
82206+ return sizeof(struct gr_arg_wrapper);
82207+}
82208+
82209+#ifdef CONFIG_COMPAT
82210+extern int copy_gr_arg_wrapper_compat(const char *buf, struct gr_arg_wrapper *uwrap);
82211+extern int copy_gr_arg_compat(const struct gr_arg __user *buf, struct gr_arg *arg);
82212+extern int copy_acl_object_label_compat(struct acl_object_label *obj, const struct acl_object_label *userp);
82213+extern int copy_acl_subject_label_compat(struct acl_subject_label *subj, const struct acl_subject_label *userp);
82214+extern int copy_acl_role_label_compat(struct acl_role_label *role, const struct acl_role_label *userp);
82215+extern int copy_role_allowed_ip_compat(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp);
82216+extern int copy_role_transition_compat(struct role_transition *trans, const struct role_transition *userp);
82217+extern int copy_gr_hash_struct_compat(struct gr_hash_struct *hash, const struct gr_hash_struct *userp);
82218+extern int copy_pointer_from_array_compat(void *ptr, unsigned long idx, const void *userp);
82219+extern int copy_acl_ip_label_compat(struct acl_ip_label *ip, const struct acl_ip_label *userp);
82220+extern int copy_sprole_pw_compat(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp);
82221+extern size_t get_gr_arg_wrapper_size_compat(void);
82222+
82223+int (* copy_gr_arg_wrapper)(const char *buf, struct gr_arg_wrapper *uwrap) __read_only;
82224+int (* copy_gr_arg)(const struct gr_arg *buf, struct gr_arg *arg) __read_only;
82225+int (* copy_acl_object_label)(struct acl_object_label *obj, const struct acl_object_label *userp) __read_only;
82226+int (* copy_acl_subject_label)(struct acl_subject_label *subj, const struct acl_subject_label *userp) __read_only;
82227+int (* copy_acl_role_label)(struct acl_role_label *role, const struct acl_role_label *userp) __read_only;
82228+int (* copy_acl_ip_label)(struct acl_ip_label *ip, const struct acl_ip_label *userp) __read_only;
82229+int (* copy_pointer_from_array)(void *ptr, unsigned long idx, const void *userp) __read_only;
82230+int (* copy_sprole_pw)(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp) __read_only;
82231+int (* copy_gr_hash_struct)(struct gr_hash_struct *hash, const struct gr_hash_struct *userp) __read_only;
82232+int (* copy_role_transition)(struct role_transition *trans, const struct role_transition *userp) __read_only;
82233+int (* copy_role_allowed_ip)(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp) __read_only;
82234+size_t (* get_gr_arg_wrapper_size)(void) __read_only;
82235+
82236+#else
82237+#define copy_gr_arg_wrapper copy_gr_arg_wrapper_normal
82238+#define copy_gr_arg copy_gr_arg_normal
82239+#define copy_gr_hash_struct copy_gr_hash_struct_normal
82240+#define copy_acl_object_label copy_acl_object_label_normal
82241+#define copy_acl_subject_label copy_acl_subject_label_normal
82242+#define copy_acl_role_label copy_acl_role_label_normal
82243+#define copy_acl_ip_label copy_acl_ip_label_normal
82244+#define copy_pointer_from_array copy_pointer_from_array_normal
82245+#define copy_sprole_pw copy_sprole_pw_normal
82246+#define copy_role_transition copy_role_transition_normal
82247+#define copy_role_allowed_ip copy_role_allowed_ip_normal
82248+#define get_gr_arg_wrapper_size get_gr_arg_wrapper_size_normal
82249+#endif
82250+
82251+static struct acl_subject_label *
82252+lookup_subject_map(const struct acl_subject_label *userp)
82253+{
82254+ unsigned int index = gr_shash(userp, polstate->subj_map_set.s_size);
82255+ struct subject_map *match;
82256+
82257+ match = polstate->subj_map_set.s_hash[index];
82258+
82259+ while (match && match->user != userp)
82260+ match = match->next;
82261+
82262+ if (match != NULL)
82263+ return match->kernel;
82264+ else
82265+ return NULL;
82266+}
82267+
82268+static void
82269+insert_subj_map_entry(struct subject_map *subjmap)
82270+{
82271+ unsigned int index = gr_shash(subjmap->user, polstate->subj_map_set.s_size);
82272+ struct subject_map **curr;
82273+
82274+ subjmap->prev = NULL;
82275+
82276+ curr = &polstate->subj_map_set.s_hash[index];
82277+ if (*curr != NULL)
82278+ (*curr)->prev = subjmap;
82279+
82280+ subjmap->next = *curr;
82281+ *curr = subjmap;
82282+
82283+ return;
82284+}
82285+
82286+static void
82287+__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
82288+{
82289+ unsigned int index =
82290+ gr_rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), polstate->acl_role_set.r_size);
82291+ struct acl_role_label **curr;
82292+ struct acl_role_label *tmp, *tmp2;
82293+
82294+ curr = &polstate->acl_role_set.r_hash[index];
82295+
82296+ /* simple case, slot is empty, just set it to our role */
82297+ if (*curr == NULL) {
82298+ *curr = role;
82299+ } else {
82300+ /* example:
82301+ 1 -> 2 -> 3 (adding 2 -> 3 to here)
82302+ 2 -> 3
82303+ */
82304+ /* first check to see if we can already be reached via this slot */
82305+ tmp = *curr;
82306+ while (tmp && tmp != role)
82307+ tmp = tmp->next;
82308+ if (tmp == role) {
82309+ /* we don't need to add ourselves to this slot's chain */
82310+ return;
82311+ }
82312+ /* we need to add ourselves to this chain, two cases */
82313+ if (role->next == NULL) {
82314+ /* simple case, append the current chain to our role */
82315+ role->next = *curr;
82316+ *curr = role;
82317+ } else {
82318+ /* 1 -> 2 -> 3 -> 4
82319+ 2 -> 3 -> 4
82320+ 3 -> 4 (adding 1 -> 2 -> 3 -> 4 to here)
82321+ */
82322+ /* trickier case: walk our role's chain until we find
82323+ the role for the start of the current slot's chain */
82324+ tmp = role;
82325+ tmp2 = *curr;
82326+ while (tmp->next && tmp->next != tmp2)
82327+ tmp = tmp->next;
82328+ if (tmp->next == tmp2) {
82329+ /* from example above, we found 3, so just
82330+ replace this slot's chain with ours */
82331+ *curr = role;
82332+ } else {
82333+ /* we didn't find a subset of our role's chain
82334+ in the current slot's chain, so append their
82335+ chain to ours, and set us as the first role in
82336+ the slot's chain
82337+
82338+ we could fold this case with the case above,
82339+ but making it explicit for clarity
82340+ */
82341+ tmp->next = tmp2;
82342+ *curr = role;
82343+ }
82344+ }
82345+ }
82346+
82347+ return;
82348+}
82349+
82350+static void
82351+insert_acl_role_label(struct acl_role_label *role)
82352+{
82353+ int i;
82354+
82355+ if (polstate->role_list == NULL) {
82356+ polstate->role_list = role;
82357+ role->prev = NULL;
82358+ } else {
82359+ role->prev = polstate->role_list;
82360+ polstate->role_list = role;
82361+ }
82362+
82363+ /* used for hash chains */
82364+ role->next = NULL;
82365+
82366+ if (role->roletype & GR_ROLE_DOMAIN) {
82367+ for (i = 0; i < role->domain_child_num; i++)
82368+ __insert_acl_role_label(role, role->domain_children[i]);
82369+ } else
82370+ __insert_acl_role_label(role, role->uidgid);
82371+}
82372+
82373+static int
82374+insert_name_entry(char *name, const u64 inode, const dev_t device, __u8 deleted)
82375+{
82376+ struct name_entry **curr, *nentry;
82377+ struct inodev_entry *ientry;
82378+ unsigned int len = strlen(name);
82379+ unsigned int key = full_name_hash(name, len);
82380+ unsigned int index = key % polstate->name_set.n_size;
82381+
82382+ curr = &polstate->name_set.n_hash[index];
82383+
82384+ while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
82385+ curr = &((*curr)->next);
82386+
82387+ if (*curr != NULL)
82388+ return 1;
82389+
82390+ nentry = acl_alloc(sizeof (struct name_entry));
82391+ if (nentry == NULL)
82392+ return 0;
82393+ ientry = acl_alloc(sizeof (struct inodev_entry));
82394+ if (ientry == NULL)
82395+ return 0;
82396+ ientry->nentry = nentry;
82397+
82398+ nentry->key = key;
82399+ nentry->name = name;
82400+ nentry->inode = inode;
82401+ nentry->device = device;
82402+ nentry->len = len;
82403+ nentry->deleted = deleted;
82404+
82405+ nentry->prev = NULL;
82406+ curr = &polstate->name_set.n_hash[index];
82407+ if (*curr != NULL)
82408+ (*curr)->prev = nentry;
82409+ nentry->next = *curr;
82410+ *curr = nentry;
82411+
82412+ /* insert us into the table searchable by inode/dev */
82413+ __insert_inodev_entry(polstate, ientry);
82414+
82415+ return 1;
82416+}
82417+
82418+/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
82419+
82420+static void *
82421+create_table(__u32 * len, int elementsize)
82422+{
82423+ unsigned int table_sizes[] = {
82424+ 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
82425+ 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
82426+ 4194301, 8388593, 16777213, 33554393, 67108859
82427+ };
82428+ void *newtable = NULL;
82429+ unsigned int pwr = 0;
82430+
82431+ while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
82432+ table_sizes[pwr] <= *len)
82433+ pwr++;
82434+
82435+ if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
82436+ return newtable;
82437+
82438+ if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
82439+ newtable =
82440+ kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
82441+ else
82442+ newtable = vmalloc(table_sizes[pwr] * elementsize);
82443+
82444+ *len = table_sizes[pwr];
82445+
82446+ return newtable;
82447+}
82448+
82449+static int
82450+init_variables(const struct gr_arg *arg, bool reload)
82451+{
82452+ struct task_struct *reaper = init_pid_ns.child_reaper;
82453+ unsigned int stacksize;
82454+
82455+ polstate->subj_map_set.s_size = arg->role_db.num_subjects;
82456+ polstate->acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
82457+ polstate->name_set.n_size = arg->role_db.num_objects;
82458+ polstate->inodev_set.i_size = arg->role_db.num_objects;
82459+
82460+ if (!polstate->subj_map_set.s_size || !polstate->acl_role_set.r_size ||
82461+ !polstate->name_set.n_size || !polstate->inodev_set.i_size)
82462+ return 1;
82463+
82464+ if (!reload) {
82465+ if (!gr_init_uidset())
82466+ return 1;
82467+ }
82468+
82469+ /* set up the stack that holds allocation info */
82470+
82471+ stacksize = arg->role_db.num_pointers + 5;
82472+
82473+ if (!acl_alloc_stack_init(stacksize))
82474+ return 1;
82475+
82476+ if (!reload) {
82477+ /* grab reference for the real root dentry and vfsmount */
82478+ get_fs_root(reaper->fs, &gr_real_root);
82479+
82480+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
82481+ printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(gr_real_root.dentry), gr_real_root.dentry->d_inode->i_ino);
82482+#endif
82483+
82484+ fakefs_obj_rw = kzalloc(sizeof(struct acl_object_label), GFP_KERNEL);
82485+ if (fakefs_obj_rw == NULL)
82486+ return 1;
82487+ fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
82488+
82489+ fakefs_obj_rwx = kzalloc(sizeof(struct acl_object_label), GFP_KERNEL);
82490+ if (fakefs_obj_rwx == NULL)
82491+ return 1;
82492+ fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
82493+ }
82494+
82495+ polstate->subj_map_set.s_hash =
82496+ (struct subject_map **) create_table(&polstate->subj_map_set.s_size, sizeof(void *));
82497+ polstate->acl_role_set.r_hash =
82498+ (struct acl_role_label **) create_table(&polstate->acl_role_set.r_size, sizeof(void *));
82499+ polstate->name_set.n_hash = (struct name_entry **) create_table(&polstate->name_set.n_size, sizeof(void *));
82500+ polstate->inodev_set.i_hash =
82501+ (struct inodev_entry **) create_table(&polstate->inodev_set.i_size, sizeof(void *));
82502+
82503+ if (!polstate->subj_map_set.s_hash || !polstate->acl_role_set.r_hash ||
82504+ !polstate->name_set.n_hash || !polstate->inodev_set.i_hash)
82505+ return 1;
82506+
82507+ memset(polstate->subj_map_set.s_hash, 0,
82508+ sizeof(struct subject_map *) * polstate->subj_map_set.s_size);
82509+ memset(polstate->acl_role_set.r_hash, 0,
82510+ sizeof (struct acl_role_label *) * polstate->acl_role_set.r_size);
82511+ memset(polstate->name_set.n_hash, 0,
82512+ sizeof (struct name_entry *) * polstate->name_set.n_size);
82513+ memset(polstate->inodev_set.i_hash, 0,
82514+ sizeof (struct inodev_entry *) * polstate->inodev_set.i_size);
82515+
82516+ return 0;
82517+}
82518+
82519+/* free information not needed after startup
82520+ currently contains user->kernel pointer mappings for subjects
82521+*/
82522+
82523+static void
82524+free_init_variables(void)
82525+{
82526+ __u32 i;
82527+
82528+ if (polstate->subj_map_set.s_hash) {
82529+ for (i = 0; i < polstate->subj_map_set.s_size; i++) {
82530+ if (polstate->subj_map_set.s_hash[i]) {
82531+ kfree(polstate->subj_map_set.s_hash[i]);
82532+ polstate->subj_map_set.s_hash[i] = NULL;
82533+ }
82534+ }
82535+
82536+ if ((polstate->subj_map_set.s_size * sizeof (struct subject_map *)) <=
82537+ PAGE_SIZE)
82538+ kfree(polstate->subj_map_set.s_hash);
82539+ else
82540+ vfree(polstate->subj_map_set.s_hash);
82541+ }
82542+
82543+ return;
82544+}
82545+
82546+static void
82547+free_variables(bool reload)
82548+{
82549+ struct acl_subject_label *s;
82550+ struct acl_role_label *r;
82551+ struct task_struct *task, *task2;
82552+ unsigned int x;
82553+
82554+ if (!reload) {
82555+ gr_clear_learn_entries();
82556+
82557+ read_lock(&tasklist_lock);
82558+ do_each_thread(task2, task) {
82559+ task->acl_sp_role = 0;
82560+ task->acl_role_id = 0;
82561+ task->inherited = 0;
82562+ task->acl = NULL;
82563+ task->role = NULL;
82564+ } while_each_thread(task2, task);
82565+ read_unlock(&tasklist_lock);
82566+
82567+ kfree(fakefs_obj_rw);
82568+ fakefs_obj_rw = NULL;
82569+ kfree(fakefs_obj_rwx);
82570+ fakefs_obj_rwx = NULL;
82571+
82572+ /* release the reference to the real root dentry and vfsmount */
82573+ path_put(&gr_real_root);
82574+ memset(&gr_real_root, 0, sizeof(gr_real_root));
82575+ }
82576+
82577+ /* free all object hash tables */
82578+
82579+ FOR_EACH_ROLE_START(r)
82580+ if (r->subj_hash == NULL)
82581+ goto next_role;
82582+ FOR_EACH_SUBJECT_START(r, s, x)
82583+ if (s->obj_hash == NULL)
82584+ break;
82585+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
82586+ kfree(s->obj_hash);
82587+ else
82588+ vfree(s->obj_hash);
82589+ FOR_EACH_SUBJECT_END(s, x)
82590+ FOR_EACH_NESTED_SUBJECT_START(r, s)
82591+ if (s->obj_hash == NULL)
82592+ break;
82593+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
82594+ kfree(s->obj_hash);
82595+ else
82596+ vfree(s->obj_hash);
82597+ FOR_EACH_NESTED_SUBJECT_END(s)
82598+ if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
82599+ kfree(r->subj_hash);
82600+ else
82601+ vfree(r->subj_hash);
82602+ r->subj_hash = NULL;
82603+next_role:
82604+ FOR_EACH_ROLE_END(r)
82605+
82606+ acl_free_all();
82607+
82608+ if (polstate->acl_role_set.r_hash) {
82609+ if ((polstate->acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
82610+ PAGE_SIZE)
82611+ kfree(polstate->acl_role_set.r_hash);
82612+ else
82613+ vfree(polstate->acl_role_set.r_hash);
82614+ }
82615+ if (polstate->name_set.n_hash) {
82616+ if ((polstate->name_set.n_size * sizeof (struct name_entry *)) <=
82617+ PAGE_SIZE)
82618+ kfree(polstate->name_set.n_hash);
82619+ else
82620+ vfree(polstate->name_set.n_hash);
82621+ }
82622+
82623+ if (polstate->inodev_set.i_hash) {
82624+ if ((polstate->inodev_set.i_size * sizeof (struct inodev_entry *)) <=
82625+ PAGE_SIZE)
82626+ kfree(polstate->inodev_set.i_hash);
82627+ else
82628+ vfree(polstate->inodev_set.i_hash);
82629+ }
82630+
82631+ if (!reload)
82632+ gr_free_uidset();
82633+
82634+ memset(&polstate->name_set, 0, sizeof (struct name_db));
82635+ memset(&polstate->inodev_set, 0, sizeof (struct inodev_db));
82636+ memset(&polstate->acl_role_set, 0, sizeof (struct acl_role_db));
82637+ memset(&polstate->subj_map_set, 0, sizeof (struct acl_subj_map_db));
82638+
82639+ polstate->default_role = NULL;
82640+ polstate->kernel_role = NULL;
82641+ polstate->role_list = NULL;
82642+
82643+ return;
82644+}
82645+
82646+static struct acl_subject_label *
82647+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int *already_copied);
82648+
82649+static int alloc_and_copy_string(char **name, unsigned int maxlen)
82650+{
82651+ unsigned int len = strnlen_user(*name, maxlen);
82652+ char *tmp;
82653+
82654+ if (!len || len >= maxlen)
82655+ return -EINVAL;
82656+
82657+ if ((tmp = (char *) acl_alloc(len)) == NULL)
82658+ return -ENOMEM;
82659+
82660+ if (copy_from_user(tmp, *name, len))
82661+ return -EFAULT;
82662+
82663+ tmp[len-1] = '\0';
82664+ *name = tmp;
82665+
82666+ return 0;
82667+}
82668+
82669+static int
82670+copy_user_glob(struct acl_object_label *obj)
82671+{
82672+ struct acl_object_label *g_tmp, **guser;
82673+ int error;
82674+
82675+ if (obj->globbed == NULL)
82676+ return 0;
82677+
82678+ guser = &obj->globbed;
82679+ while (*guser) {
82680+ g_tmp = (struct acl_object_label *)
82681+ acl_alloc(sizeof (struct acl_object_label));
82682+ if (g_tmp == NULL)
82683+ return -ENOMEM;
82684+
82685+ if (copy_acl_object_label(g_tmp, *guser))
82686+ return -EFAULT;
82687+
82688+ error = alloc_and_copy_string(&g_tmp->filename, PATH_MAX);
82689+ if (error)
82690+ return error;
82691+
82692+ *guser = g_tmp;
82693+ guser = &(g_tmp->next);
82694+ }
82695+
82696+ return 0;
82697+}
82698+
82699+static int
82700+copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
82701+ struct acl_role_label *role)
82702+{
82703+ struct acl_object_label *o_tmp;
82704+ int ret;
82705+
82706+ while (userp) {
82707+ if ((o_tmp = (struct acl_object_label *)
82708+ acl_alloc(sizeof (struct acl_object_label))) == NULL)
82709+ return -ENOMEM;
82710+
82711+ if (copy_acl_object_label(o_tmp, userp))
82712+ return -EFAULT;
82713+
82714+ userp = o_tmp->prev;
82715+
82716+ ret = alloc_and_copy_string(&o_tmp->filename, PATH_MAX);
82717+ if (ret)
82718+ return ret;
82719+
82720+ insert_acl_obj_label(o_tmp, subj);
82721+ if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
82722+ o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
82723+ return -ENOMEM;
82724+
82725+ ret = copy_user_glob(o_tmp);
82726+ if (ret)
82727+ return ret;
82728+
82729+ if (o_tmp->nested) {
82730+ int already_copied;
82731+
82732+ o_tmp->nested = do_copy_user_subj(o_tmp->nested, role, &already_copied);
82733+ if (IS_ERR(o_tmp->nested))
82734+ return PTR_ERR(o_tmp->nested);
82735+
82736+ /* insert into nested subject list if we haven't copied this one yet
82737+ to prevent duplicate entries */
82738+ if (!already_copied) {
82739+ o_tmp->nested->next = role->hash->first;
82740+ role->hash->first = o_tmp->nested;
82741+ }
82742+ }
82743+ }
82744+
82745+ return 0;
82746+}
82747+
82748+static __u32
82749+count_user_subjs(struct acl_subject_label *userp)
82750+{
82751+ struct acl_subject_label s_tmp;
82752+ __u32 num = 0;
82753+
82754+ while (userp) {
82755+ if (copy_acl_subject_label(&s_tmp, userp))
82756+ break;
82757+
82758+ userp = s_tmp.prev;
82759+ }
82760+
82761+ return num;
82762+}
82763+
82764+static int
82765+copy_user_allowedips(struct acl_role_label *rolep)
82766+{
82767+ struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
82768+
82769+ ruserip = rolep->allowed_ips;
82770+
82771+ while (ruserip) {
82772+ rlast = rtmp;
82773+
82774+ if ((rtmp = (struct role_allowed_ip *)
82775+ acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
82776+ return -ENOMEM;
82777+
82778+ if (copy_role_allowed_ip(rtmp, ruserip))
82779+ return -EFAULT;
82780+
82781+ ruserip = rtmp->prev;
82782+
82783+ if (!rlast) {
82784+ rtmp->prev = NULL;
82785+ rolep->allowed_ips = rtmp;
82786+ } else {
82787+ rlast->next = rtmp;
82788+ rtmp->prev = rlast;
82789+ }
82790+
82791+ if (!ruserip)
82792+ rtmp->next = NULL;
82793+ }
82794+
82795+ return 0;
82796+}
82797+
82798+static int
82799+copy_user_transitions(struct acl_role_label *rolep)
82800+{
82801+ struct role_transition *rusertp, *rtmp = NULL, *rlast;
82802+ int error;
82803+
82804+ rusertp = rolep->transitions;
82805+
82806+ while (rusertp) {
82807+ rlast = rtmp;
82808+
82809+ if ((rtmp = (struct role_transition *)
82810+ acl_alloc(sizeof (struct role_transition))) == NULL)
82811+ return -ENOMEM;
82812+
82813+ if (copy_role_transition(rtmp, rusertp))
82814+ return -EFAULT;
82815+
82816+ rusertp = rtmp->prev;
82817+
82818+ error = alloc_and_copy_string(&rtmp->rolename, GR_SPROLE_LEN);
82819+ if (error)
82820+ return error;
82821+
82822+ if (!rlast) {
82823+ rtmp->prev = NULL;
82824+ rolep->transitions = rtmp;
82825+ } else {
82826+ rlast->next = rtmp;
82827+ rtmp->prev = rlast;
82828+ }
82829+
82830+ if (!rusertp)
82831+ rtmp->next = NULL;
82832+ }
82833+
82834+ return 0;
82835+}
82836+
82837+static __u32 count_user_objs(const struct acl_object_label __user *userp)
82838+{
82839+ struct acl_object_label o_tmp;
82840+ __u32 num = 0;
82841+
82842+ while (userp) {
82843+ if (copy_acl_object_label(&o_tmp, userp))
82844+ break;
82845+
82846+ userp = o_tmp.prev;
82847+ num++;
82848+ }
82849+
82850+ return num;
82851+}
82852+
82853+static struct acl_subject_label *
82854+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int *already_copied)
82855+{
82856+ struct acl_subject_label *s_tmp = NULL, *s_tmp2;
82857+ __u32 num_objs;
82858+ struct acl_ip_label **i_tmp, *i_utmp2;
82859+ struct gr_hash_struct ghash;
82860+ struct subject_map *subjmap;
82861+ unsigned int i_num;
82862+ int err;
82863+
82864+ if (already_copied != NULL)
82865+ *already_copied = 0;
82866+
82867+ s_tmp = lookup_subject_map(userp);
82868+
82869+ /* we've already copied this subject into the kernel, just return
82870+ the reference to it, and don't copy it over again
82871+ */
82872+ if (s_tmp) {
82873+ if (already_copied != NULL)
82874+ *already_copied = 1;
82875+ return(s_tmp);
82876+ }
82877+
82878+ if ((s_tmp = (struct acl_subject_label *)
82879+ acl_alloc(sizeof (struct acl_subject_label))) == NULL)
82880+ return ERR_PTR(-ENOMEM);
82881+
82882+ subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
82883+ if (subjmap == NULL)
82884+ return ERR_PTR(-ENOMEM);
82885+
82886+ subjmap->user = userp;
82887+ subjmap->kernel = s_tmp;
82888+ insert_subj_map_entry(subjmap);
82889+
82890+ if (copy_acl_subject_label(s_tmp, userp))
82891+ return ERR_PTR(-EFAULT);
82892+
82893+ err = alloc_and_copy_string(&s_tmp->filename, PATH_MAX);
82894+ if (err)
82895+ return ERR_PTR(err);
82896+
82897+ if (!strcmp(s_tmp->filename, "/"))
82898+ role->root_label = s_tmp;
82899+
82900+ if (copy_gr_hash_struct(&ghash, s_tmp->hash))
82901+ return ERR_PTR(-EFAULT);
82902+
82903+ /* copy user and group transition tables */
82904+
82905+ if (s_tmp->user_trans_num) {
82906+ uid_t *uidlist;
82907+
82908+ uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
82909+ if (uidlist == NULL)
82910+ return ERR_PTR(-ENOMEM);
82911+ if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
82912+ return ERR_PTR(-EFAULT);
82913+
82914+ s_tmp->user_transitions = uidlist;
82915+ }
82916+
82917+ if (s_tmp->group_trans_num) {
82918+ gid_t *gidlist;
82919+
82920+ gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
82921+ if (gidlist == NULL)
82922+ return ERR_PTR(-ENOMEM);
82923+ if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
82924+ return ERR_PTR(-EFAULT);
82925+
82926+ s_tmp->group_transitions = gidlist;
82927+ }
82928+
82929+ /* set up object hash table */
82930+ num_objs = count_user_objs(ghash.first);
82931+
82932+ s_tmp->obj_hash_size = num_objs;
82933+ s_tmp->obj_hash =
82934+ (struct acl_object_label **)
82935+ create_table(&(s_tmp->obj_hash_size), sizeof(void *));
82936+
82937+ if (!s_tmp->obj_hash)
82938+ return ERR_PTR(-ENOMEM);
82939+
82940+ memset(s_tmp->obj_hash, 0,
82941+ s_tmp->obj_hash_size *
82942+ sizeof (struct acl_object_label *));
82943+
82944+ /* add in objects */
82945+ err = copy_user_objs(ghash.first, s_tmp, role);
82946+
82947+ if (err)
82948+ return ERR_PTR(err);
82949+
82950+ /* set pointer for parent subject */
82951+ if (s_tmp->parent_subject) {
82952+ s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role, NULL);
82953+
82954+ if (IS_ERR(s_tmp2))
82955+ return s_tmp2;
82956+
82957+ s_tmp->parent_subject = s_tmp2;
82958+ }
82959+
82960+ /* add in ip acls */
82961+
82962+ if (!s_tmp->ip_num) {
82963+ s_tmp->ips = NULL;
82964+ goto insert;
82965+ }
82966+
82967+ i_tmp =
82968+ (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
82969+ sizeof (struct acl_ip_label *));
82970+
82971+ if (!i_tmp)
82972+ return ERR_PTR(-ENOMEM);
82973+
82974+ for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
82975+ *(i_tmp + i_num) =
82976+ (struct acl_ip_label *)
82977+ acl_alloc(sizeof (struct acl_ip_label));
82978+ if (!*(i_tmp + i_num))
82979+ return ERR_PTR(-ENOMEM);
82980+
82981+ if (copy_pointer_from_array(&i_utmp2, i_num, s_tmp->ips))
82982+ return ERR_PTR(-EFAULT);
82983+
82984+ if (copy_acl_ip_label(*(i_tmp + i_num), i_utmp2))
82985+ return ERR_PTR(-EFAULT);
82986+
82987+ if ((*(i_tmp + i_num))->iface == NULL)
82988+ continue;
82989+
82990+ err = alloc_and_copy_string(&(*(i_tmp + i_num))->iface, IFNAMSIZ);
82991+ if (err)
82992+ return ERR_PTR(err);
82993+ }
82994+
82995+ s_tmp->ips = i_tmp;
82996+
82997+insert:
82998+ if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
82999+ s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
83000+ return ERR_PTR(-ENOMEM);
83001+
83002+ return s_tmp;
83003+}
83004+
83005+static int
83006+copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
83007+{
83008+ struct acl_subject_label s_pre;
83009+ struct acl_subject_label * ret;
83010+ int err;
83011+
83012+ while (userp) {
83013+ if (copy_acl_subject_label(&s_pre, userp))
83014+ return -EFAULT;
83015+
83016+ ret = do_copy_user_subj(userp, role, NULL);
83017+
83018+ err = PTR_ERR(ret);
83019+ if (IS_ERR(ret))
83020+ return err;
83021+
83022+ insert_acl_subj_label(ret, role);
83023+
83024+ userp = s_pre.prev;
83025+ }
83026+
83027+ return 0;
83028+}
83029+
83030+static int
83031+copy_user_acl(struct gr_arg *arg)
83032+{
83033+ struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
83034+ struct acl_subject_label *subj_list;
83035+ struct sprole_pw *sptmp;
83036+ struct gr_hash_struct *ghash;
83037+ uid_t *domainlist;
83038+ unsigned int r_num;
83039+ int err = 0;
83040+ __u16 i;
83041+ __u32 num_subjs;
83042+
83043+ /* we need a default and kernel role */
83044+ if (arg->role_db.num_roles < 2)
83045+ return -EINVAL;
83046+
83047+ /* copy special role authentication info from userspace */
83048+
83049+ polstate->num_sprole_pws = arg->num_sprole_pws;
83050+ polstate->acl_special_roles = (struct sprole_pw **) acl_alloc_num(polstate->num_sprole_pws, sizeof(struct sprole_pw *));
83051+
83052+ if (!polstate->acl_special_roles && polstate->num_sprole_pws)
83053+ return -ENOMEM;
83054+
83055+ for (i = 0; i < polstate->num_sprole_pws; i++) {
83056+ sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
83057+ if (!sptmp)
83058+ return -ENOMEM;
83059+ if (copy_sprole_pw(sptmp, i, arg->sprole_pws))
83060+ return -EFAULT;
83061+
83062+ err = alloc_and_copy_string((char **)&sptmp->rolename, GR_SPROLE_LEN);
83063+ if (err)
83064+ return err;
83065+
83066+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
83067+ printk(KERN_ALERT "Copying special role %s\n", sptmp->rolename);
83068+#endif
83069+
83070+ polstate->acl_special_roles[i] = sptmp;
83071+ }
83072+
83073+ r_utmp = (struct acl_role_label **) arg->role_db.r_table;
83074+
83075+ for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
83076+ r_tmp = acl_alloc(sizeof (struct acl_role_label));
83077+
83078+ if (!r_tmp)
83079+ return -ENOMEM;
83080+
83081+ if (copy_pointer_from_array(&r_utmp2, r_num, r_utmp))
83082+ return -EFAULT;
83083+
83084+ if (copy_acl_role_label(r_tmp, r_utmp2))
83085+ return -EFAULT;
83086+
83087+ err = alloc_and_copy_string(&r_tmp->rolename, GR_SPROLE_LEN);
83088+ if (err)
83089+ return err;
83090+
83091+ if (!strcmp(r_tmp->rolename, "default")
83092+ && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
83093+ polstate->default_role = r_tmp;
83094+ } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
83095+ polstate->kernel_role = r_tmp;
83096+ }
83097+
83098+ if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL)
83099+ return -ENOMEM;
83100+
83101+ if (copy_gr_hash_struct(ghash, r_tmp->hash))
83102+ return -EFAULT;
83103+
83104+ r_tmp->hash = ghash;
83105+
83106+ num_subjs = count_user_subjs(r_tmp->hash->first);
83107+
83108+ r_tmp->subj_hash_size = num_subjs;
83109+ r_tmp->subj_hash =
83110+ (struct acl_subject_label **)
83111+ create_table(&(r_tmp->subj_hash_size), sizeof(void *));
83112+
83113+ if (!r_tmp->subj_hash)
83114+ return -ENOMEM;
83115+
83116+ err = copy_user_allowedips(r_tmp);
83117+ if (err)
83118+ return err;
83119+
83120+ /* copy domain info */
83121+ if (r_tmp->domain_children != NULL) {
83122+ domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
83123+ if (domainlist == NULL)
83124+ return -ENOMEM;
83125+
83126+ if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t)))
83127+ return -EFAULT;
83128+
83129+ r_tmp->domain_children = domainlist;
83130+ }
83131+
83132+ err = copy_user_transitions(r_tmp);
83133+ if (err)
83134+ return err;
83135+
83136+ memset(r_tmp->subj_hash, 0,
83137+ r_tmp->subj_hash_size *
83138+ sizeof (struct acl_subject_label *));
83139+
83140+ /* acquire the list of subjects, then NULL out
83141+ the list prior to parsing the subjects for this role,
83142+ as during this parsing the list is replaced with a list
83143+ of *nested* subjects for the role
83144+ */
83145+ subj_list = r_tmp->hash->first;
83146+
83147+ /* set nested subject list to null */
83148+ r_tmp->hash->first = NULL;
83149+
83150+ err = copy_user_subjs(subj_list, r_tmp);
83151+
83152+ if (err)
83153+ return err;
83154+
83155+ insert_acl_role_label(r_tmp);
83156+ }
83157+
83158+ if (polstate->default_role == NULL || polstate->kernel_role == NULL)
83159+ return -EINVAL;
83160+
83161+ return err;
83162+}
83163+
83164+static int gracl_reload_apply_policies(void *reload)
83165+{
83166+ struct gr_reload_state *reload_state = (struct gr_reload_state *)reload;
83167+ struct task_struct *task, *task2;
83168+ struct acl_role_label *role, *rtmp;
83169+ struct acl_subject_label *subj;
83170+ const struct cred *cred;
83171+ int role_applied;
83172+ int ret = 0;
83173+
83174+ memcpy(&reload_state->oldpolicy, reload_state->oldpolicy_ptr, sizeof(struct gr_policy_state));
83175+ memcpy(&reload_state->oldalloc, reload_state->oldalloc_ptr, sizeof(struct gr_alloc_state));
83176+
83177+ /* first make sure we'll be able to apply the new policy cleanly */
83178+ do_each_thread(task2, task) {
83179+ if (task->exec_file == NULL)
83180+ continue;
83181+ role_applied = 0;
83182+ if (!reload_state->oldmode && task->role->roletype & GR_ROLE_SPECIAL) {
83183+ /* preserve special roles */
83184+ FOR_EACH_ROLE_START(role)
83185+ if ((role->roletype & GR_ROLE_SPECIAL) && !strcmp(task->role->rolename, role->rolename)) {
83186+ rtmp = task->role;
83187+ task->role = role;
83188+ role_applied = 1;
83189+ break;
83190+ }
83191+ FOR_EACH_ROLE_END(role)
83192+ }
83193+ if (!role_applied) {
83194+ cred = __task_cred(task);
83195+ rtmp = task->role;
83196+ task->role = __lookup_acl_role_label(polstate, task, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid));
83197+ }
83198+ /* this handles non-nested inherited subjects, nested subjects will still
83199+ be dropped currently */
83200+ subj = __gr_get_subject_for_task(polstate, task, task->acl->filename, 1);
83201+ task->tmpacl = __gr_get_subject_for_task(polstate, task, NULL, 1);
83202+ /* change the role back so that we've made no modifications to the policy */
83203+ task->role = rtmp;
83204+
83205+ if (subj == NULL || task->tmpacl == NULL) {
83206+ ret = -EINVAL;
83207+ goto out;
83208+ }
83209+ } while_each_thread(task2, task);
83210+
83211+ /* now actually apply the policy */
83212+
83213+ do_each_thread(task2, task) {
83214+ if (task->exec_file) {
83215+ role_applied = 0;
83216+ if (!reload_state->oldmode && task->role->roletype & GR_ROLE_SPECIAL) {
83217+ /* preserve special roles */
83218+ FOR_EACH_ROLE_START(role)
83219+ if ((role->roletype & GR_ROLE_SPECIAL) && !strcmp(task->role->rolename, role->rolename)) {
83220+ task->role = role;
83221+ role_applied = 1;
83222+ break;
83223+ }
83224+ FOR_EACH_ROLE_END(role)
83225+ }
83226+ if (!role_applied) {
83227+ cred = __task_cred(task);
83228+ task->role = __lookup_acl_role_label(polstate, task, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid));
83229+ }
83230+ /* this handles non-nested inherited subjects, nested subjects will still
83231+ be dropped currently */
83232+ if (!reload_state->oldmode && task->inherited)
83233+ subj = __gr_get_subject_for_task(polstate, task, task->acl->filename, 1);
83234+ else {
83235+ /* looked up and tagged to the task previously */
83236+ subj = task->tmpacl;
83237+ }
83238+ /* subj will be non-null */
83239+ __gr_apply_subject_to_task(polstate, task, subj);
83240+ if (reload_state->oldmode) {
83241+ task->acl_role_id = 0;
83242+ task->acl_sp_role = 0;
83243+ task->inherited = 0;
83244+ }
83245+ } else {
83246+ // it's a kernel process
83247+ task->role = polstate->kernel_role;
83248+ task->acl = polstate->kernel_role->root_label;
83249+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
83250+ task->acl->mode &= ~GR_PROCFIND;
83251+#endif
83252+ }
83253+ } while_each_thread(task2, task);
83254+
83255+ memcpy(reload_state->oldpolicy_ptr, &reload_state->newpolicy, sizeof(struct gr_policy_state));
83256+ memcpy(reload_state->oldalloc_ptr, &reload_state->newalloc, sizeof(struct gr_alloc_state));
83257+
83258+out:
83259+
83260+ return ret;
83261+}
83262+
83263+static int gracl_reload(struct gr_arg *args, unsigned char oldmode)
83264+{
83265+ struct gr_reload_state new_reload_state = { };
83266+ int err;
83267+
83268+ new_reload_state.oldpolicy_ptr = polstate;
83269+ new_reload_state.oldalloc_ptr = current_alloc_state;
83270+ new_reload_state.oldmode = oldmode;
83271+
83272+ current_alloc_state = &new_reload_state.newalloc;
83273+ polstate = &new_reload_state.newpolicy;
83274+
83275+ /* everything relevant is now saved off, copy in the new policy */
83276+ if (init_variables(args, true)) {
83277+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
83278+ err = -ENOMEM;
83279+ goto error;
83280+ }
83281+
83282+ err = copy_user_acl(args);
83283+ free_init_variables();
83284+ if (err)
83285+ goto error;
83286+ /* the new policy is copied in, with the old policy available via saved_state
83287+ first go through applying roles, making sure to preserve special roles
83288+ then apply new subjects, making sure to preserve inherited and nested subjects,
83289+ though currently only inherited subjects will be preserved
83290+ */
83291+ err = stop_machine(gracl_reload_apply_policies, &new_reload_state, NULL);
83292+ if (err)
83293+ goto error;
83294+
83295+ /* we've now applied the new policy, so restore the old policy state to free it */
83296+ polstate = &new_reload_state.oldpolicy;
83297+ current_alloc_state = &new_reload_state.oldalloc;
83298+ free_variables(true);
83299+
83300+ /* oldpolicy/oldalloc_ptr point to the new policy/alloc states as they were copied
83301+ to running_polstate/current_alloc_state inside stop_machine
83302+ */
83303+ err = 0;
83304+ goto out;
83305+error:
83306+ /* on error of loading the new policy, we'll just keep the previous
83307+ policy set around
83308+ */
83309+ free_variables(true);
83310+
83311+ /* doesn't affect runtime, but maintains consistent state */
83312+out:
83313+ polstate = new_reload_state.oldpolicy_ptr;
83314+ current_alloc_state = new_reload_state.oldalloc_ptr;
83315+
83316+ return err;
83317+}
83318+
83319+static int
83320+gracl_init(struct gr_arg *args)
83321+{
83322+ int error = 0;
83323+
83324+ memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
83325+ memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
83326+
83327+ if (init_variables(args, false)) {
83328+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
83329+ error = -ENOMEM;
83330+ goto out;
83331+ }
83332+
83333+ error = copy_user_acl(args);
83334+ free_init_variables();
83335+ if (error)
83336+ goto out;
83337+
83338+ error = gr_set_acls(0);
83339+ if (error)
83340+ goto out;
83341+
83342+ gr_enable_rbac_system();
83343+
83344+ return 0;
83345+
83346+out:
83347+ free_variables(false);
83348+ return error;
83349+}
83350+
83351+static int
83352+lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
83353+ unsigned char **sum)
83354+{
83355+ struct acl_role_label *r;
83356+ struct role_allowed_ip *ipp;
83357+ struct role_transition *trans;
83358+ unsigned int i;
83359+ int found = 0;
83360+ u32 curr_ip = current->signal->curr_ip;
83361+
83362+ current->signal->saved_ip = curr_ip;
83363+
83364+ /* check transition table */
83365+
83366+ for (trans = current->role->transitions; trans; trans = trans->next) {
83367+ if (!strcmp(rolename, trans->rolename)) {
83368+ found = 1;
83369+ break;
83370+ }
83371+ }
83372+
83373+ if (!found)
83374+ return 0;
83375+
83376+ /* handle special roles that do not require authentication
83377+ and check ip */
83378+
83379+ FOR_EACH_ROLE_START(r)
83380+ if (!strcmp(rolename, r->rolename) &&
83381+ (r->roletype & GR_ROLE_SPECIAL)) {
83382+ found = 0;
83383+ if (r->allowed_ips != NULL) {
83384+ for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
83385+ if ((ntohl(curr_ip) & ipp->netmask) ==
83386+ (ntohl(ipp->addr) & ipp->netmask))
83387+ found = 1;
83388+ }
83389+ } else
83390+ found = 2;
83391+ if (!found)
83392+ return 0;
83393+
83394+ if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
83395+ ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
83396+ *salt = NULL;
83397+ *sum = NULL;
83398+ return 1;
83399+ }
83400+ }
83401+ FOR_EACH_ROLE_END(r)
83402+
83403+ for (i = 0; i < polstate->num_sprole_pws; i++) {
83404+ if (!strcmp(rolename, polstate->acl_special_roles[i]->rolename)) {
83405+ *salt = polstate->acl_special_roles[i]->salt;
83406+ *sum = polstate->acl_special_roles[i]->sum;
83407+ return 1;
83408+ }
83409+ }
83410+
83411+ return 0;
83412+}
83413+
83414+int gr_check_secure_terminal(struct task_struct *task)
83415+{
83416+ struct task_struct *p, *p2, *p3;
83417+ struct files_struct *files;
83418+ struct fdtable *fdt;
83419+ struct file *our_file = NULL, *file;
83420+ int i;
83421+
83422+ if (task->signal->tty == NULL)
83423+ return 1;
83424+
83425+ files = get_files_struct(task);
83426+ if (files != NULL) {
83427+ rcu_read_lock();
83428+ fdt = files_fdtable(files);
83429+ for (i=0; i < fdt->max_fds; i++) {
83430+ file = fcheck_files(files, i);
83431+ if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
83432+ get_file(file);
83433+ our_file = file;
83434+ }
83435+ }
83436+ rcu_read_unlock();
83437+ put_files_struct(files);
83438+ }
83439+
83440+ if (our_file == NULL)
83441+ return 1;
83442+
83443+ read_lock(&tasklist_lock);
83444+ do_each_thread(p2, p) {
83445+ files = get_files_struct(p);
83446+ if (files == NULL ||
83447+ (p->signal && p->signal->tty == task->signal->tty)) {
83448+ if (files != NULL)
83449+ put_files_struct(files);
83450+ continue;
83451+ }
83452+ rcu_read_lock();
83453+ fdt = files_fdtable(files);
83454+ for (i=0; i < fdt->max_fds; i++) {
83455+ file = fcheck_files(files, i);
83456+ if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
83457+ file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
83458+ p3 = task;
83459+ while (task_pid_nr(p3) > 0) {
83460+ if (p3 == p)
83461+ break;
83462+ p3 = p3->real_parent;
83463+ }
83464+ if (p3 == p)
83465+ break;
83466+ gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
83467+ gr_handle_alertkill(p);
83468+ rcu_read_unlock();
83469+ put_files_struct(files);
83470+ read_unlock(&tasklist_lock);
83471+ fput(our_file);
83472+ return 0;
83473+ }
83474+ }
83475+ rcu_read_unlock();
83476+ put_files_struct(files);
83477+ } while_each_thread(p2, p);
83478+ read_unlock(&tasklist_lock);
83479+
83480+ fput(our_file);
83481+ return 1;
83482+}
83483+
83484+ssize_t
83485+write_grsec_handler(struct file *file, const char __user * buf, size_t count, loff_t *ppos)
83486+{
83487+ struct gr_arg_wrapper uwrap;
83488+ unsigned char *sprole_salt = NULL;
83489+ unsigned char *sprole_sum = NULL;
83490+ int error = 0;
83491+ int error2 = 0;
83492+ size_t req_count = 0;
83493+ unsigned char oldmode = 0;
83494+
83495+ mutex_lock(&gr_dev_mutex);
83496+
83497+ if (gr_acl_is_enabled() && !(current->acl->mode & GR_KERNELAUTH)) {
83498+ error = -EPERM;
83499+ goto out;
83500+ }
83501+
83502+#ifdef CONFIG_COMPAT
83503+ pax_open_kernel();
83504+ if (is_compat_task()) {
83505+ copy_gr_arg_wrapper = &copy_gr_arg_wrapper_compat;
83506+ copy_gr_arg = &copy_gr_arg_compat;
83507+ copy_acl_object_label = &copy_acl_object_label_compat;
83508+ copy_acl_subject_label = &copy_acl_subject_label_compat;
83509+ copy_acl_role_label = &copy_acl_role_label_compat;
83510+ copy_acl_ip_label = &copy_acl_ip_label_compat;
83511+ copy_role_allowed_ip = &copy_role_allowed_ip_compat;
83512+ copy_role_transition = &copy_role_transition_compat;
83513+ copy_sprole_pw = &copy_sprole_pw_compat;
83514+ copy_gr_hash_struct = &copy_gr_hash_struct_compat;
83515+ copy_pointer_from_array = &copy_pointer_from_array_compat;
83516+ get_gr_arg_wrapper_size = &get_gr_arg_wrapper_size_compat;
83517+ } else {
83518+ copy_gr_arg_wrapper = &copy_gr_arg_wrapper_normal;
83519+ copy_gr_arg = &copy_gr_arg_normal;
83520+ copy_acl_object_label = &copy_acl_object_label_normal;
83521+ copy_acl_subject_label = &copy_acl_subject_label_normal;
83522+ copy_acl_role_label = &copy_acl_role_label_normal;
83523+ copy_acl_ip_label = &copy_acl_ip_label_normal;
83524+ copy_role_allowed_ip = &copy_role_allowed_ip_normal;
83525+ copy_role_transition = &copy_role_transition_normal;
83526+ copy_sprole_pw = &copy_sprole_pw_normal;
83527+ copy_gr_hash_struct = &copy_gr_hash_struct_normal;
83528+ copy_pointer_from_array = &copy_pointer_from_array_normal;
83529+ get_gr_arg_wrapper_size = &get_gr_arg_wrapper_size_normal;
83530+ }
83531+ pax_close_kernel();
83532+#endif
83533+
83534+ req_count = get_gr_arg_wrapper_size();
83535+
83536+ if (count != req_count) {
83537+ gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)req_count);
83538+ error = -EINVAL;
83539+ goto out;
83540+ }
83541+
83542+
83543+ if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
83544+ gr_auth_expires = 0;
83545+ gr_auth_attempts = 0;
83546+ }
83547+
83548+ error = copy_gr_arg_wrapper(buf, &uwrap);
83549+ if (error)
83550+ goto out;
83551+
83552+ error = copy_gr_arg(uwrap.arg, gr_usermode);
83553+ if (error)
83554+ goto out;
83555+
83556+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
83557+ gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
83558+ time_after(gr_auth_expires, get_seconds())) {
83559+ error = -EBUSY;
83560+ goto out;
83561+ }
83562+
83563+ /* if non-root trying to do anything other than use a special role,
83564+ do not attempt authentication, do not count towards authentication
83565+ locking
83566+ */
83567+
83568+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
83569+ gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
83570+ gr_is_global_nonroot(current_uid())) {
83571+ error = -EPERM;
83572+ goto out;
83573+ }
83574+
83575+ /* ensure pw and special role name are null terminated */
83576+
83577+ gr_usermode->pw[GR_PW_LEN - 1] = '\0';
83578+ gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
83579+
83580+ /* Okay.
83581+ * We have our enough of the argument structure..(we have yet
83582+ * to copy_from_user the tables themselves) . Copy the tables
83583+ * only if we need them, i.e. for loading operations. */
83584+
83585+ switch (gr_usermode->mode) {
83586+ case GR_STATUS:
83587+ if (gr_acl_is_enabled()) {
83588+ error = 1;
83589+ if (!gr_check_secure_terminal(current))
83590+ error = 3;
83591+ } else
83592+ error = 2;
83593+ goto out;
83594+ case GR_SHUTDOWN:
83595+ if (gr_acl_is_enabled() && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
83596+ stop_machine(gr_rbac_disable, NULL, NULL);
83597+ free_variables(false);
83598+ memset(gr_usermode, 0, sizeof(struct gr_arg));
83599+ memset(gr_system_salt, 0, GR_SALT_LEN);
83600+ memset(gr_system_sum, 0, GR_SHA_LEN);
83601+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
83602+ } else if (gr_acl_is_enabled()) {
83603+ gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
83604+ error = -EPERM;
83605+ } else {
83606+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
83607+ error = -EAGAIN;
83608+ }
83609+ break;
83610+ case GR_ENABLE:
83611+ if (!gr_acl_is_enabled() && !(error2 = gracl_init(gr_usermode)))
83612+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
83613+ else {
83614+ if (gr_acl_is_enabled())
83615+ error = -EAGAIN;
83616+ else
83617+ error = error2;
83618+ gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
83619+ }
83620+ break;
83621+ case GR_OLDRELOAD:
83622+ oldmode = 1;
83623+ case GR_RELOAD:
83624+ if (!gr_acl_is_enabled()) {
83625+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
83626+ error = -EAGAIN;
83627+ } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
83628+ error2 = gracl_reload(gr_usermode, oldmode);
83629+ if (!error2)
83630+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
83631+ else {
83632+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
83633+ error = error2;
83634+ }
83635+ } else {
83636+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
83637+ error = -EPERM;
83638+ }
83639+ break;
83640+ case GR_SEGVMOD:
83641+ if (unlikely(!gr_acl_is_enabled())) {
83642+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
83643+ error = -EAGAIN;
83644+ break;
83645+ }
83646+
83647+ if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
83648+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
83649+ if (gr_usermode->segv_device && gr_usermode->segv_inode) {
83650+ struct acl_subject_label *segvacl;
83651+ segvacl =
83652+ lookup_acl_subj_label(gr_usermode->segv_inode,
83653+ gr_usermode->segv_device,
83654+ current->role);
83655+ if (segvacl) {
83656+ segvacl->crashes = 0;
83657+ segvacl->expires = 0;
83658+ }
83659+ } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
83660+ gr_remove_uid(gr_usermode->segv_uid);
83661+ }
83662+ } else {
83663+ gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
83664+ error = -EPERM;
83665+ }
83666+ break;
83667+ case GR_SPROLE:
83668+ case GR_SPROLEPAM:
83669+ if (unlikely(!gr_acl_is_enabled())) {
83670+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
83671+ error = -EAGAIN;
83672+ break;
83673+ }
83674+
83675+ if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
83676+ current->role->expires = 0;
83677+ current->role->auth_attempts = 0;
83678+ }
83679+
83680+ if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
83681+ time_after(current->role->expires, get_seconds())) {
83682+ error = -EBUSY;
83683+ goto out;
83684+ }
83685+
83686+ if (lookup_special_role_auth
83687+ (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
83688+ && ((!sprole_salt && !sprole_sum)
83689+ || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
83690+ char *p = "";
83691+ assign_special_role(gr_usermode->sp_role);
83692+ read_lock(&tasklist_lock);
83693+ if (current->real_parent)
83694+ p = current->real_parent->role->rolename;
83695+ read_unlock(&tasklist_lock);
83696+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
83697+ p, acl_sp_role_value);
83698+ } else {
83699+ gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
83700+ error = -EPERM;
83701+ if(!(current->role->auth_attempts++))
83702+ current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
83703+
83704+ goto out;
83705+ }
83706+ break;
83707+ case GR_UNSPROLE:
83708+ if (unlikely(!gr_acl_is_enabled())) {
83709+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
83710+ error = -EAGAIN;
83711+ break;
83712+ }
83713+
83714+ if (current->role->roletype & GR_ROLE_SPECIAL) {
83715+ char *p = "";
83716+ int i = 0;
83717+
83718+ read_lock(&tasklist_lock);
83719+ if (current->real_parent) {
83720+ p = current->real_parent->role->rolename;
83721+ i = current->real_parent->acl_role_id;
83722+ }
83723+ read_unlock(&tasklist_lock);
83724+
83725+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
83726+ gr_set_acls(1);
83727+ } else {
83728+ error = -EPERM;
83729+ goto out;
83730+ }
83731+ break;
83732+ default:
83733+ gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
83734+ error = -EINVAL;
83735+ break;
83736+ }
83737+
83738+ if (error != -EPERM)
83739+ goto out;
83740+
83741+ if(!(gr_auth_attempts++))
83742+ gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
83743+
83744+ out:
83745+ mutex_unlock(&gr_dev_mutex);
83746+
83747+ if (!error)
83748+ error = req_count;
83749+
83750+ return error;
83751+}
83752+
83753+int
83754+gr_set_acls(const int type)
83755+{
83756+ struct task_struct *task, *task2;
83757+ struct acl_role_label *role = current->role;
83758+ struct acl_subject_label *subj;
83759+ __u16 acl_role_id = current->acl_role_id;
83760+ const struct cred *cred;
83761+ int ret;
83762+
83763+ rcu_read_lock();
83764+ read_lock(&tasklist_lock);
83765+ read_lock(&grsec_exec_file_lock);
83766+ do_each_thread(task2, task) {
83767+ /* check to see if we're called from the exit handler,
83768+ if so, only replace ACLs that have inherited the admin
83769+ ACL */
83770+
83771+ if (type && (task->role != role ||
83772+ task->acl_role_id != acl_role_id))
83773+ continue;
83774+
83775+ task->acl_role_id = 0;
83776+ task->acl_sp_role = 0;
83777+ task->inherited = 0;
83778+
83779+ if (task->exec_file) {
83780+ cred = __task_cred(task);
83781+ task->role = __lookup_acl_role_label(polstate, task, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid));
83782+ subj = __gr_get_subject_for_task(polstate, task, NULL, 1);
83783+ if (subj == NULL) {
83784+ ret = -EINVAL;
83785+ read_unlock(&grsec_exec_file_lock);
83786+ read_unlock(&tasklist_lock);
83787+ rcu_read_unlock();
83788+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task_pid_nr(task));
83789+ return ret;
83790+ }
83791+ __gr_apply_subject_to_task(polstate, task, subj);
83792+ } else {
83793+ // it's a kernel process
83794+ task->role = polstate->kernel_role;
83795+ task->acl = polstate->kernel_role->root_label;
83796+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
83797+ task->acl->mode &= ~GR_PROCFIND;
83798+#endif
83799+ }
83800+ } while_each_thread(task2, task);
83801+ read_unlock(&grsec_exec_file_lock);
83802+ read_unlock(&tasklist_lock);
83803+ rcu_read_unlock();
83804+
83805+ return 0;
83806+}
83807diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c
83808new file mode 100644
83809index 0000000..39645c9
83810--- /dev/null
83811+++ b/grsecurity/gracl_res.c
83812@@ -0,0 +1,68 @@
83813+#include <linux/kernel.h>
83814+#include <linux/sched.h>
83815+#include <linux/gracl.h>
83816+#include <linux/grinternal.h>
83817+
83818+static const char *restab_log[] = {
83819+ [RLIMIT_CPU] = "RLIMIT_CPU",
83820+ [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
83821+ [RLIMIT_DATA] = "RLIMIT_DATA",
83822+ [RLIMIT_STACK] = "RLIMIT_STACK",
83823+ [RLIMIT_CORE] = "RLIMIT_CORE",
83824+ [RLIMIT_RSS] = "RLIMIT_RSS",
83825+ [RLIMIT_NPROC] = "RLIMIT_NPROC",
83826+ [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
83827+ [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
83828+ [RLIMIT_AS] = "RLIMIT_AS",
83829+ [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
83830+ [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
83831+ [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
83832+ [RLIMIT_NICE] = "RLIMIT_NICE",
83833+ [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
83834+ [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
83835+ [GR_CRASH_RES] = "RLIMIT_CRASH"
83836+};
83837+
83838+void
83839+gr_log_resource(const struct task_struct *task,
83840+ const int res, const unsigned long wanted, const int gt)
83841+{
83842+ const struct cred *cred;
83843+ unsigned long rlim;
83844+
83845+ if (!gr_acl_is_enabled() && !grsec_resource_logging)
83846+ return;
83847+
83848+ // not yet supported resource
83849+ if (unlikely(!restab_log[res]))
83850+ return;
83851+
83852+ if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
83853+ rlim = task_rlimit_max(task, res);
83854+ else
83855+ rlim = task_rlimit(task, res);
83856+
83857+ if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
83858+ return;
83859+
83860+ rcu_read_lock();
83861+ cred = __task_cred(task);
83862+
83863+ if (res == RLIMIT_NPROC &&
83864+ (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
83865+ cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
83866+ goto out_rcu_unlock;
83867+ else if (res == RLIMIT_MEMLOCK &&
83868+ cap_raised(cred->cap_effective, CAP_IPC_LOCK))
83869+ goto out_rcu_unlock;
83870+ else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
83871+ goto out_rcu_unlock;
83872+ rcu_read_unlock();
83873+
83874+ gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
83875+
83876+ return;
83877+out_rcu_unlock:
83878+ rcu_read_unlock();
83879+ return;
83880+}
83881diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c
83882new file mode 100644
83883index 0000000..35d9e65
83884--- /dev/null
83885+++ b/grsecurity/gracl_segv.c
83886@@ -0,0 +1,324 @@
83887+#include <linux/kernel.h>
83888+#include <linux/mm.h>
83889+#include <asm/uaccess.h>
83890+#include <asm/errno.h>
83891+#include <asm/mman.h>
83892+#include <net/sock.h>
83893+#include <linux/file.h>
83894+#include <linux/fs.h>
83895+#include <linux/net.h>
83896+#include <linux/in.h>
83897+#include <linux/slab.h>
83898+#include <linux/types.h>
83899+#include <linux/sched.h>
83900+#include <linux/timer.h>
83901+#include <linux/gracl.h>
83902+#include <linux/grsecurity.h>
83903+#include <linux/grinternal.h>
83904+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
83905+#include <linux/magic.h>
83906+#include <linux/pagemap.h>
83907+#include "../fs/btrfs/async-thread.h"
83908+#include "../fs/btrfs/ctree.h"
83909+#include "../fs/btrfs/btrfs_inode.h"
83910+#endif
83911+
83912+static struct crash_uid *uid_set;
83913+static unsigned short uid_used;
83914+static DEFINE_SPINLOCK(gr_uid_lock);
83915+extern rwlock_t gr_inode_lock;
83916+extern struct acl_subject_label *
83917+ lookup_acl_subj_label(const u64 inode, const dev_t dev,
83918+ struct acl_role_label *role);
83919+
83920+static inline dev_t __get_dev(const struct dentry *dentry)
83921+{
83922+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
83923+ if (dentry->d_sb->s_magic == BTRFS_SUPER_MAGIC)
83924+ return BTRFS_I(dentry->d_inode)->root->anon_dev;
83925+ else
83926+#endif
83927+ return dentry->d_sb->s_dev;
83928+}
83929+
83930+static inline u64 __get_ino(const struct dentry *dentry)
83931+{
83932+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
83933+ if (dentry->d_sb->s_magic == BTRFS_SUPER_MAGIC)
83934+ return btrfs_ino(dentry->d_inode);
83935+ else
83936+#endif
83937+ return dentry->d_inode->i_ino;
83938+}
83939+
83940+int
83941+gr_init_uidset(void)
83942+{
83943+ uid_set =
83944+ kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
83945+ uid_used = 0;
83946+
83947+ return uid_set ? 1 : 0;
83948+}
83949+
83950+void
83951+gr_free_uidset(void)
83952+{
83953+ if (uid_set) {
83954+ struct crash_uid *tmpset;
83955+ spin_lock(&gr_uid_lock);
83956+ tmpset = uid_set;
83957+ uid_set = NULL;
83958+ uid_used = 0;
83959+ spin_unlock(&gr_uid_lock);
83960+ if (tmpset)
83961+ kfree(tmpset);
83962+ }
83963+
83964+ return;
83965+}
83966+
83967+int
83968+gr_find_uid(const uid_t uid)
83969+{
83970+ struct crash_uid *tmp = uid_set;
83971+ uid_t buid;
83972+ int low = 0, high = uid_used - 1, mid;
83973+
83974+ while (high >= low) {
83975+ mid = (low + high) >> 1;
83976+ buid = tmp[mid].uid;
83977+ if (buid == uid)
83978+ return mid;
83979+ if (buid > uid)
83980+ high = mid - 1;
83981+ if (buid < uid)
83982+ low = mid + 1;
83983+ }
83984+
83985+ return -1;
83986+}
83987+
83988+static void
83989+gr_insertsort(void)
83990+{
83991+ unsigned short i, j;
83992+ struct crash_uid index;
83993+
83994+ for (i = 1; i < uid_used; i++) {
83995+ index = uid_set[i];
83996+ j = i;
83997+ while ((j > 0) && uid_set[j - 1].uid > index.uid) {
83998+ uid_set[j] = uid_set[j - 1];
83999+ j--;
84000+ }
84001+ uid_set[j] = index;
84002+ }
84003+
84004+ return;
84005+}
84006+
84007+static void
84008+gr_insert_uid(const kuid_t kuid, const unsigned long expires)
84009+{
84010+ int loc;
84011+ uid_t uid = GR_GLOBAL_UID(kuid);
84012+
84013+ if (uid_used == GR_UIDTABLE_MAX)
84014+ return;
84015+
84016+ loc = gr_find_uid(uid);
84017+
84018+ if (loc >= 0) {
84019+ uid_set[loc].expires = expires;
84020+ return;
84021+ }
84022+
84023+ uid_set[uid_used].uid = uid;
84024+ uid_set[uid_used].expires = expires;
84025+ uid_used++;
84026+
84027+ gr_insertsort();
84028+
84029+ return;
84030+}
84031+
84032+void
84033+gr_remove_uid(const unsigned short loc)
84034+{
84035+ unsigned short i;
84036+
84037+ for (i = loc + 1; i < uid_used; i++)
84038+ uid_set[i - 1] = uid_set[i];
84039+
84040+ uid_used--;
84041+
84042+ return;
84043+}
84044+
84045+int
84046+gr_check_crash_uid(const kuid_t kuid)
84047+{
84048+ int loc;
84049+ int ret = 0;
84050+ uid_t uid;
84051+
84052+ if (unlikely(!gr_acl_is_enabled()))
84053+ return 0;
84054+
84055+ uid = GR_GLOBAL_UID(kuid);
84056+
84057+ spin_lock(&gr_uid_lock);
84058+ loc = gr_find_uid(uid);
84059+
84060+ if (loc < 0)
84061+ goto out_unlock;
84062+
84063+ if (time_before_eq(uid_set[loc].expires, get_seconds()))
84064+ gr_remove_uid(loc);
84065+ else
84066+ ret = 1;
84067+
84068+out_unlock:
84069+ spin_unlock(&gr_uid_lock);
84070+ return ret;
84071+}
84072+
84073+static int
84074+proc_is_setxid(const struct cred *cred)
84075+{
84076+ if (!uid_eq(cred->uid, cred->euid) || !uid_eq(cred->uid, cred->suid) ||
84077+ !uid_eq(cred->uid, cred->fsuid))
84078+ return 1;
84079+ if (!gid_eq(cred->gid, cred->egid) || !gid_eq(cred->gid, cred->sgid) ||
84080+ !gid_eq(cred->gid, cred->fsgid))
84081+ return 1;
84082+
84083+ return 0;
84084+}
84085+
84086+extern int gr_fake_force_sig(int sig, struct task_struct *t);
84087+
84088+void
84089+gr_handle_crash(struct task_struct *task, const int sig)
84090+{
84091+ struct acl_subject_label *curr;
84092+ struct task_struct *tsk, *tsk2;
84093+ const struct cred *cred;
84094+ const struct cred *cred2;
84095+
84096+ if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
84097+ return;
84098+
84099+ if (unlikely(!gr_acl_is_enabled()))
84100+ return;
84101+
84102+ curr = task->acl;
84103+
84104+ if (!(curr->resmask & (1U << GR_CRASH_RES)))
84105+ return;
84106+
84107+ if (time_before_eq(curr->expires, get_seconds())) {
84108+ curr->expires = 0;
84109+ curr->crashes = 0;
84110+ }
84111+
84112+ curr->crashes++;
84113+
84114+ if (!curr->expires)
84115+ curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
84116+
84117+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
84118+ time_after(curr->expires, get_seconds())) {
84119+ rcu_read_lock();
84120+ cred = __task_cred(task);
84121+ if (gr_is_global_nonroot(cred->uid) && proc_is_setxid(cred)) {
84122+ gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
84123+ spin_lock(&gr_uid_lock);
84124+ gr_insert_uid(cred->uid, curr->expires);
84125+ spin_unlock(&gr_uid_lock);
84126+ curr->expires = 0;
84127+ curr->crashes = 0;
84128+ read_lock(&tasklist_lock);
84129+ do_each_thread(tsk2, tsk) {
84130+ cred2 = __task_cred(tsk);
84131+ if (tsk != task && uid_eq(cred2->uid, cred->uid))
84132+ gr_fake_force_sig(SIGKILL, tsk);
84133+ } while_each_thread(tsk2, tsk);
84134+ read_unlock(&tasklist_lock);
84135+ } else {
84136+ gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
84137+ read_lock(&tasklist_lock);
84138+ read_lock(&grsec_exec_file_lock);
84139+ do_each_thread(tsk2, tsk) {
84140+ if (likely(tsk != task)) {
84141+ // if this thread has the same subject as the one that triggered
84142+ // RES_CRASH and it's the same binary, kill it
84143+ if (tsk->acl == task->acl && gr_is_same_file(tsk->exec_file, task->exec_file))
84144+ gr_fake_force_sig(SIGKILL, tsk);
84145+ }
84146+ } while_each_thread(tsk2, tsk);
84147+ read_unlock(&grsec_exec_file_lock);
84148+ read_unlock(&tasklist_lock);
84149+ }
84150+ rcu_read_unlock();
84151+ }
84152+
84153+ return;
84154+}
84155+
84156+int
84157+gr_check_crash_exec(const struct file *filp)
84158+{
84159+ struct acl_subject_label *curr;
84160+ struct dentry *dentry;
84161+
84162+ if (unlikely(!gr_acl_is_enabled()))
84163+ return 0;
84164+
84165+ read_lock(&gr_inode_lock);
84166+ dentry = filp->f_path.dentry;
84167+ curr = lookup_acl_subj_label(__get_ino(dentry), __get_dev(dentry),
84168+ current->role);
84169+ read_unlock(&gr_inode_lock);
84170+
84171+ if (!curr || !(curr->resmask & (1U << GR_CRASH_RES)) ||
84172+ (!curr->crashes && !curr->expires))
84173+ return 0;
84174+
84175+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
84176+ time_after(curr->expires, get_seconds()))
84177+ return 1;
84178+ else if (time_before_eq(curr->expires, get_seconds())) {
84179+ curr->crashes = 0;
84180+ curr->expires = 0;
84181+ }
84182+
84183+ return 0;
84184+}
84185+
84186+void
84187+gr_handle_alertkill(struct task_struct *task)
84188+{
84189+ struct acl_subject_label *curracl;
84190+ __u32 curr_ip;
84191+ struct task_struct *p, *p2;
84192+
84193+ if (unlikely(!gr_acl_is_enabled()))
84194+ return;
84195+
84196+ curracl = task->acl;
84197+ curr_ip = task->signal->curr_ip;
84198+
84199+ if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
84200+ read_lock(&tasklist_lock);
84201+ do_each_thread(p2, p) {
84202+ if (p->signal->curr_ip == curr_ip)
84203+ gr_fake_force_sig(SIGKILL, p);
84204+ } while_each_thread(p2, p);
84205+ read_unlock(&tasklist_lock);
84206+ } else if (curracl->mode & GR_KILLPROC)
84207+ gr_fake_force_sig(SIGKILL, task);
84208+
84209+ return;
84210+}
84211diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c
84212new file mode 100644
84213index 0000000..6b0c9cc
84214--- /dev/null
84215+++ b/grsecurity/gracl_shm.c
84216@@ -0,0 +1,40 @@
84217+#include <linux/kernel.h>
84218+#include <linux/mm.h>
84219+#include <linux/sched.h>
84220+#include <linux/file.h>
84221+#include <linux/ipc.h>
84222+#include <linux/gracl.h>
84223+#include <linux/grsecurity.h>
84224+#include <linux/grinternal.h>
84225+
84226+int
84227+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
84228+ const u64 shm_createtime, const kuid_t cuid, const int shmid)
84229+{
84230+ struct task_struct *task;
84231+
84232+ if (!gr_acl_is_enabled())
84233+ return 1;
84234+
84235+ rcu_read_lock();
84236+ read_lock(&tasklist_lock);
84237+
84238+ task = find_task_by_vpid(shm_cprid);
84239+
84240+ if (unlikely(!task))
84241+ task = find_task_by_vpid(shm_lapid);
84242+
84243+ if (unlikely(task && (time_before_eq64(task->start_time, shm_createtime) ||
84244+ (task_pid_nr(task) == shm_lapid)) &&
84245+ (task->acl->mode & GR_PROTSHM) &&
84246+ (task->acl != current->acl))) {
84247+ read_unlock(&tasklist_lock);
84248+ rcu_read_unlock();
84249+ gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, GR_GLOBAL_UID(cuid), shm_cprid, shmid);
84250+ return 0;
84251+ }
84252+ read_unlock(&tasklist_lock);
84253+ rcu_read_unlock();
84254+
84255+ return 1;
84256+}
84257diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c
84258new file mode 100644
84259index 0000000..bc0be01
84260--- /dev/null
84261+++ b/grsecurity/grsec_chdir.c
84262@@ -0,0 +1,19 @@
84263+#include <linux/kernel.h>
84264+#include <linux/sched.h>
84265+#include <linux/fs.h>
84266+#include <linux/file.h>
84267+#include <linux/grsecurity.h>
84268+#include <linux/grinternal.h>
84269+
84270+void
84271+gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
84272+{
84273+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
84274+ if ((grsec_enable_chdir && grsec_enable_group &&
84275+ in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
84276+ !grsec_enable_group)) {
84277+ gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
84278+ }
84279+#endif
84280+ return;
84281+}
84282diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c
84283new file mode 100644
84284index 0000000..114ea4f
84285--- /dev/null
84286+++ b/grsecurity/grsec_chroot.c
84287@@ -0,0 +1,467 @@
84288+#include <linux/kernel.h>
84289+#include <linux/module.h>
84290+#include <linux/sched.h>
84291+#include <linux/file.h>
84292+#include <linux/fs.h>
84293+#include <linux/mount.h>
84294+#include <linux/types.h>
84295+#include "../fs/mount.h"
84296+#include <linux/grsecurity.h>
84297+#include <linux/grinternal.h>
84298+
84299+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
84300+int gr_init_ran;
84301+#endif
84302+
84303+void gr_inc_chroot_refcnts(struct dentry *dentry, struct vfsmount *mnt)
84304+{
84305+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
84306+ struct dentry *tmpd = dentry;
84307+
84308+ read_seqlock_excl(&mount_lock);
84309+ write_seqlock(&rename_lock);
84310+
84311+ while (tmpd != mnt->mnt_root) {
84312+ atomic_inc(&tmpd->chroot_refcnt);
84313+ tmpd = tmpd->d_parent;
84314+ }
84315+ atomic_inc(&tmpd->chroot_refcnt);
84316+
84317+ write_sequnlock(&rename_lock);
84318+ read_sequnlock_excl(&mount_lock);
84319+#endif
84320+}
84321+
84322+void gr_dec_chroot_refcnts(struct dentry *dentry, struct vfsmount *mnt)
84323+{
84324+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
84325+ struct dentry *tmpd = dentry;
84326+
84327+ read_seqlock_excl(&mount_lock);
84328+ write_seqlock(&rename_lock);
84329+
84330+ while (tmpd != mnt->mnt_root) {
84331+ atomic_dec(&tmpd->chroot_refcnt);
84332+ tmpd = tmpd->d_parent;
84333+ }
84334+ atomic_dec(&tmpd->chroot_refcnt);
84335+
84336+ write_sequnlock(&rename_lock);
84337+ read_sequnlock_excl(&mount_lock);
84338+#endif
84339+}
84340+
84341+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
84342+static struct dentry *get_closest_chroot(struct dentry *dentry)
84343+{
84344+ write_seqlock(&rename_lock);
84345+ do {
84346+ if (atomic_read(&dentry->chroot_refcnt)) {
84347+ write_sequnlock(&rename_lock);
84348+ return dentry;
84349+ }
84350+ dentry = dentry->d_parent;
84351+ } while (!IS_ROOT(dentry));
84352+ write_sequnlock(&rename_lock);
84353+ return NULL;
84354+}
84355+#endif
84356+
84357+int gr_bad_chroot_rename(struct dentry *olddentry, struct vfsmount *oldmnt,
84358+ struct dentry *newdentry, struct vfsmount *newmnt)
84359+{
84360+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
84361+ struct dentry *chroot;
84362+
84363+ if (unlikely(!grsec_enable_chroot_rename))
84364+ return 0;
84365+
84366+ if (likely(!proc_is_chrooted(current) && gr_is_global_root(current_uid())))
84367+ return 0;
84368+
84369+ chroot = get_closest_chroot(olddentry);
84370+
84371+ if (chroot == NULL)
84372+ return 0;
84373+
84374+ if (is_subdir(newdentry, chroot))
84375+ return 0;
84376+
84377+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_RENAME_MSG, olddentry, oldmnt);
84378+
84379+ return 1;
84380+#else
84381+ return 0;
84382+#endif
84383+}
84384+
84385+void gr_set_chroot_entries(struct task_struct *task, const struct path *path)
84386+{
84387+#ifdef CONFIG_GRKERNSEC
84388+ if (task_pid_nr(task) > 1 && path->dentry != init_task.fs->root.dentry &&
84389+ path->dentry != task->nsproxy->mnt_ns->root->mnt.mnt_root
84390+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
84391+ && gr_init_ran
84392+#endif
84393+ )
84394+ task->gr_is_chrooted = 1;
84395+ else {
84396+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
84397+ if (task_pid_nr(task) == 1 && !gr_init_ran)
84398+ gr_init_ran = 1;
84399+#endif
84400+ task->gr_is_chrooted = 0;
84401+ }
84402+
84403+ task->gr_chroot_dentry = path->dentry;
84404+#endif
84405+ return;
84406+}
84407+
84408+void gr_clear_chroot_entries(struct task_struct *task)
84409+{
84410+#ifdef CONFIG_GRKERNSEC
84411+ task->gr_is_chrooted = 0;
84412+ task->gr_chroot_dentry = NULL;
84413+#endif
84414+ return;
84415+}
84416+
84417+int
84418+gr_handle_chroot_unix(const pid_t pid)
84419+{
84420+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
84421+ struct task_struct *p;
84422+
84423+ if (unlikely(!grsec_enable_chroot_unix))
84424+ return 1;
84425+
84426+ if (likely(!proc_is_chrooted(current)))
84427+ return 1;
84428+
84429+ rcu_read_lock();
84430+ read_lock(&tasklist_lock);
84431+ p = find_task_by_vpid_unrestricted(pid);
84432+ if (unlikely(p && !have_same_root(current, p))) {
84433+ read_unlock(&tasklist_lock);
84434+ rcu_read_unlock();
84435+ gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
84436+ return 0;
84437+ }
84438+ read_unlock(&tasklist_lock);
84439+ rcu_read_unlock();
84440+#endif
84441+ return 1;
84442+}
84443+
84444+int
84445+gr_handle_chroot_nice(void)
84446+{
84447+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
84448+ if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
84449+ gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
84450+ return -EPERM;
84451+ }
84452+#endif
84453+ return 0;
84454+}
84455+
84456+int
84457+gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
84458+{
84459+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
84460+ if (grsec_enable_chroot_nice && (niceval < task_nice(p))
84461+ && proc_is_chrooted(current)) {
84462+ gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, task_pid_nr(p));
84463+ return -EACCES;
84464+ }
84465+#endif
84466+ return 0;
84467+}
84468+
84469+int
84470+gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
84471+{
84472+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
84473+ struct task_struct *p;
84474+ int ret = 0;
84475+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
84476+ return ret;
84477+
84478+ read_lock(&tasklist_lock);
84479+ do_each_pid_task(pid, type, p) {
84480+ if (!have_same_root(current, p)) {
84481+ ret = 1;
84482+ goto out;
84483+ }
84484+ } while_each_pid_task(pid, type, p);
84485+out:
84486+ read_unlock(&tasklist_lock);
84487+ return ret;
84488+#endif
84489+ return 0;
84490+}
84491+
84492+int
84493+gr_pid_is_chrooted(struct task_struct *p)
84494+{
84495+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
84496+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
84497+ return 0;
84498+
84499+ if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
84500+ !have_same_root(current, p)) {
84501+ return 1;
84502+ }
84503+#endif
84504+ return 0;
84505+}
84506+
84507+EXPORT_SYMBOL_GPL(gr_pid_is_chrooted);
84508+
84509+#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
84510+int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
84511+{
84512+ struct path path, currentroot;
84513+ int ret = 0;
84514+
84515+ path.dentry = (struct dentry *)u_dentry;
84516+ path.mnt = (struct vfsmount *)u_mnt;
84517+ get_fs_root(current->fs, &currentroot);
84518+ if (path_is_under(&path, &currentroot))
84519+ ret = 1;
84520+ path_put(&currentroot);
84521+
84522+ return ret;
84523+}
84524+#endif
84525+
84526+int
84527+gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
84528+{
84529+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
84530+ if (!grsec_enable_chroot_fchdir)
84531+ return 1;
84532+
84533+ if (!proc_is_chrooted(current))
84534+ return 1;
84535+ else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
84536+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
84537+ return 0;
84538+ }
84539+#endif
84540+ return 1;
84541+}
84542+
84543+int
84544+gr_chroot_fhandle(void)
84545+{
84546+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
84547+ if (!grsec_enable_chroot_fchdir)
84548+ return 1;
84549+
84550+ if (!proc_is_chrooted(current))
84551+ return 1;
84552+ else {
84553+ gr_log_noargs(GR_DONT_AUDIT, GR_CHROOT_FHANDLE_MSG);
84554+ return 0;
84555+ }
84556+#endif
84557+ return 1;
84558+}
84559+
84560+int
84561+gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
84562+ const u64 shm_createtime)
84563+{
84564+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
84565+ struct task_struct *p;
84566+
84567+ if (unlikely(!grsec_enable_chroot_shmat))
84568+ return 1;
84569+
84570+ if (likely(!proc_is_chrooted(current)))
84571+ return 1;
84572+
84573+ rcu_read_lock();
84574+ read_lock(&tasklist_lock);
84575+
84576+ if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
84577+ if (time_before_eq64(p->start_time, shm_createtime)) {
84578+ if (have_same_root(current, p)) {
84579+ goto allow;
84580+ } else {
84581+ read_unlock(&tasklist_lock);
84582+ rcu_read_unlock();
84583+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
84584+ return 0;
84585+ }
84586+ }
84587+ /* creator exited, pid reuse, fall through to next check */
84588+ }
84589+ if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
84590+ if (unlikely(!have_same_root(current, p))) {
84591+ read_unlock(&tasklist_lock);
84592+ rcu_read_unlock();
84593+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
84594+ return 0;
84595+ }
84596+ }
84597+
84598+allow:
84599+ read_unlock(&tasklist_lock);
84600+ rcu_read_unlock();
84601+#endif
84602+ return 1;
84603+}
84604+
84605+void
84606+gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
84607+{
84608+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
84609+ if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
84610+ gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
84611+#endif
84612+ return;
84613+}
84614+
84615+int
84616+gr_handle_chroot_mknod(const struct dentry *dentry,
84617+ const struct vfsmount *mnt, const int mode)
84618+{
84619+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
84620+ if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
84621+ proc_is_chrooted(current)) {
84622+ gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
84623+ return -EPERM;
84624+ }
84625+#endif
84626+ return 0;
84627+}
84628+
84629+int
84630+gr_handle_chroot_mount(const struct dentry *dentry,
84631+ const struct vfsmount *mnt, const char *dev_name)
84632+{
84633+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
84634+ if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
84635+ gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
84636+ return -EPERM;
84637+ }
84638+#endif
84639+ return 0;
84640+}
84641+
84642+int
84643+gr_handle_chroot_pivot(void)
84644+{
84645+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
84646+ if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
84647+ gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
84648+ return -EPERM;
84649+ }
84650+#endif
84651+ return 0;
84652+}
84653+
84654+int
84655+gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
84656+{
84657+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
84658+ if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
84659+ !gr_is_outside_chroot(dentry, mnt)) {
84660+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
84661+ return -EPERM;
84662+ }
84663+#endif
84664+ return 0;
84665+}
84666+
84667+extern const char *captab_log[];
84668+extern int captab_log_entries;
84669+
84670+int
84671+gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
84672+{
84673+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
84674+ if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
84675+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
84676+ if (cap_raised(chroot_caps, cap)) {
84677+ if (cap_raised(cred->cap_effective, cap) && cap < captab_log_entries) {
84678+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, task, captab_log[cap]);
84679+ }
84680+ return 0;
84681+ }
84682+ }
84683+#endif
84684+ return 1;
84685+}
84686+
84687+int
84688+gr_chroot_is_capable(const int cap)
84689+{
84690+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
84691+ return gr_task_chroot_is_capable(current, current_cred(), cap);
84692+#endif
84693+ return 1;
84694+}
84695+
84696+int
84697+gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap)
84698+{
84699+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
84700+ if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
84701+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
84702+ if (cap_raised(chroot_caps, cap)) {
84703+ return 0;
84704+ }
84705+ }
84706+#endif
84707+ return 1;
84708+}
84709+
84710+int
84711+gr_chroot_is_capable_nolog(const int cap)
84712+{
84713+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
84714+ return gr_task_chroot_is_capable_nolog(current, cap);
84715+#endif
84716+ return 1;
84717+}
84718+
84719+int
84720+gr_handle_chroot_sysctl(const int op)
84721+{
84722+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
84723+ if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
84724+ proc_is_chrooted(current))
84725+ return -EACCES;
84726+#endif
84727+ return 0;
84728+}
84729+
84730+void
84731+gr_handle_chroot_chdir(const struct path *path)
84732+{
84733+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
84734+ if (grsec_enable_chroot_chdir)
84735+ set_fs_pwd(current->fs, path);
84736+#endif
84737+ return;
84738+}
84739+
84740+int
84741+gr_handle_chroot_chmod(const struct dentry *dentry,
84742+ const struct vfsmount *mnt, const int mode)
84743+{
84744+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
84745+ /* allow chmod +s on directories, but not files */
84746+ if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
84747+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
84748+ proc_is_chrooted(current)) {
84749+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
84750+ return -EPERM;
84751+ }
84752+#endif
84753+ return 0;
84754+}
84755diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c
84756new file mode 100644
84757index 0000000..946f750
84758--- /dev/null
84759+++ b/grsecurity/grsec_disabled.c
84760@@ -0,0 +1,445 @@
84761+#include <linux/kernel.h>
84762+#include <linux/module.h>
84763+#include <linux/sched.h>
84764+#include <linux/file.h>
84765+#include <linux/fs.h>
84766+#include <linux/kdev_t.h>
84767+#include <linux/net.h>
84768+#include <linux/in.h>
84769+#include <linux/ip.h>
84770+#include <linux/skbuff.h>
84771+#include <linux/sysctl.h>
84772+
84773+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
84774+void
84775+pax_set_initial_flags(struct linux_binprm *bprm)
84776+{
84777+ return;
84778+}
84779+#endif
84780+
84781+#ifdef CONFIG_SYSCTL
84782+__u32
84783+gr_handle_sysctl(const struct ctl_table * table, const int op)
84784+{
84785+ return 0;
84786+}
84787+#endif
84788+
84789+#ifdef CONFIG_TASKSTATS
84790+int gr_is_taskstats_denied(int pid)
84791+{
84792+ return 0;
84793+}
84794+#endif
84795+
84796+int
84797+gr_acl_is_enabled(void)
84798+{
84799+ return 0;
84800+}
84801+
84802+int
84803+gr_learn_cap(const struct task_struct *task, const struct cred *cred, const int cap)
84804+{
84805+ return 0;
84806+}
84807+
84808+void
84809+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
84810+{
84811+ return;
84812+}
84813+
84814+int
84815+gr_handle_rawio(const struct inode *inode)
84816+{
84817+ return 0;
84818+}
84819+
84820+void
84821+gr_acl_handle_psacct(struct task_struct *task, const long code)
84822+{
84823+ return;
84824+}
84825+
84826+int
84827+gr_handle_ptrace(struct task_struct *task, const long request)
84828+{
84829+ return 0;
84830+}
84831+
84832+int
84833+gr_handle_proc_ptrace(struct task_struct *task)
84834+{
84835+ return 0;
84836+}
84837+
84838+int
84839+gr_set_acls(const int type)
84840+{
84841+ return 0;
84842+}
84843+
84844+int
84845+gr_check_hidden_task(const struct task_struct *tsk)
84846+{
84847+ return 0;
84848+}
84849+
84850+int
84851+gr_check_protected_task(const struct task_struct *task)
84852+{
84853+ return 0;
84854+}
84855+
84856+int
84857+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
84858+{
84859+ return 0;
84860+}
84861+
84862+void
84863+gr_copy_label(struct task_struct *tsk)
84864+{
84865+ return;
84866+}
84867+
84868+void
84869+gr_set_pax_flags(struct task_struct *task)
84870+{
84871+ return;
84872+}
84873+
84874+int
84875+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
84876+ const int unsafe_share)
84877+{
84878+ return 0;
84879+}
84880+
84881+void
84882+gr_handle_delete(const u64 ino, const dev_t dev)
84883+{
84884+ return;
84885+}
84886+
84887+void
84888+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
84889+{
84890+ return;
84891+}
84892+
84893+void
84894+gr_handle_crash(struct task_struct *task, const int sig)
84895+{
84896+ return;
84897+}
84898+
84899+int
84900+gr_check_crash_exec(const struct file *filp)
84901+{
84902+ return 0;
84903+}
84904+
84905+int
84906+gr_check_crash_uid(const kuid_t uid)
84907+{
84908+ return 0;
84909+}
84910+
84911+void
84912+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
84913+ struct dentry *old_dentry,
84914+ struct dentry *new_dentry,
84915+ struct vfsmount *mnt, const __u8 replace, unsigned int flags)
84916+{
84917+ return;
84918+}
84919+
84920+int
84921+gr_search_socket(const int family, const int type, const int protocol)
84922+{
84923+ return 1;
84924+}
84925+
84926+int
84927+gr_search_connectbind(const int mode, const struct socket *sock,
84928+ const struct sockaddr_in *addr)
84929+{
84930+ return 0;
84931+}
84932+
84933+void
84934+gr_handle_alertkill(struct task_struct *task)
84935+{
84936+ return;
84937+}
84938+
84939+__u32
84940+gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
84941+{
84942+ return 1;
84943+}
84944+
84945+__u32
84946+gr_acl_handle_hidden_file(const struct dentry * dentry,
84947+ const struct vfsmount * mnt)
84948+{
84949+ return 1;
84950+}
84951+
84952+__u32
84953+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
84954+ int acc_mode)
84955+{
84956+ return 1;
84957+}
84958+
84959+__u32
84960+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
84961+{
84962+ return 1;
84963+}
84964+
84965+__u32
84966+gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
84967+{
84968+ return 1;
84969+}
84970+
84971+int
84972+gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
84973+ unsigned int *vm_flags)
84974+{
84975+ return 1;
84976+}
84977+
84978+__u32
84979+gr_acl_handle_truncate(const struct dentry * dentry,
84980+ const struct vfsmount * mnt)
84981+{
84982+ return 1;
84983+}
84984+
84985+__u32
84986+gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
84987+{
84988+ return 1;
84989+}
84990+
84991+__u32
84992+gr_acl_handle_access(const struct dentry * dentry,
84993+ const struct vfsmount * mnt, const int fmode)
84994+{
84995+ return 1;
84996+}
84997+
84998+__u32
84999+gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
85000+ umode_t *mode)
85001+{
85002+ return 1;
85003+}
85004+
85005+__u32
85006+gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
85007+{
85008+ return 1;
85009+}
85010+
85011+__u32
85012+gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
85013+{
85014+ return 1;
85015+}
85016+
85017+__u32
85018+gr_acl_handle_removexattr(const struct dentry * dentry, const struct vfsmount * mnt)
85019+{
85020+ return 1;
85021+}
85022+
85023+void
85024+grsecurity_init(void)
85025+{
85026+ return;
85027+}
85028+
85029+umode_t gr_acl_umask(void)
85030+{
85031+ return 0;
85032+}
85033+
85034+__u32
85035+gr_acl_handle_mknod(const struct dentry * new_dentry,
85036+ const struct dentry * parent_dentry,
85037+ const struct vfsmount * parent_mnt,
85038+ const int mode)
85039+{
85040+ return 1;
85041+}
85042+
85043+__u32
85044+gr_acl_handle_mkdir(const struct dentry * new_dentry,
85045+ const struct dentry * parent_dentry,
85046+ const struct vfsmount * parent_mnt)
85047+{
85048+ return 1;
85049+}
85050+
85051+__u32
85052+gr_acl_handle_symlink(const struct dentry * new_dentry,
85053+ const struct dentry * parent_dentry,
85054+ const struct vfsmount * parent_mnt, const struct filename *from)
85055+{
85056+ return 1;
85057+}
85058+
85059+__u32
85060+gr_acl_handle_link(const struct dentry * new_dentry,
85061+ const struct dentry * parent_dentry,
85062+ const struct vfsmount * parent_mnt,
85063+ const struct dentry * old_dentry,
85064+ const struct vfsmount * old_mnt, const struct filename *to)
85065+{
85066+ return 1;
85067+}
85068+
85069+int
85070+gr_acl_handle_rename(const struct dentry *new_dentry,
85071+ const struct dentry *parent_dentry,
85072+ const struct vfsmount *parent_mnt,
85073+ const struct dentry *old_dentry,
85074+ const struct inode *old_parent_inode,
85075+ const struct vfsmount *old_mnt, const struct filename *newname,
85076+ unsigned int flags)
85077+{
85078+ return 0;
85079+}
85080+
85081+int
85082+gr_acl_handle_filldir(const struct file *file, const char *name,
85083+ const int namelen, const u64 ino)
85084+{
85085+ return 1;
85086+}
85087+
85088+int
85089+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
85090+ const u64 shm_createtime, const kuid_t cuid, const int shmid)
85091+{
85092+ return 1;
85093+}
85094+
85095+int
85096+gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
85097+{
85098+ return 0;
85099+}
85100+
85101+int
85102+gr_search_accept(const struct socket *sock)
85103+{
85104+ return 0;
85105+}
85106+
85107+int
85108+gr_search_listen(const struct socket *sock)
85109+{
85110+ return 0;
85111+}
85112+
85113+int
85114+gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
85115+{
85116+ return 0;
85117+}
85118+
85119+__u32
85120+gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
85121+{
85122+ return 1;
85123+}
85124+
85125+__u32
85126+gr_acl_handle_creat(const struct dentry * dentry,
85127+ const struct dentry * p_dentry,
85128+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
85129+ const int imode)
85130+{
85131+ return 1;
85132+}
85133+
85134+void
85135+gr_acl_handle_exit(void)
85136+{
85137+ return;
85138+}
85139+
85140+int
85141+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
85142+{
85143+ return 1;
85144+}
85145+
85146+void
85147+gr_set_role_label(const kuid_t uid, const kgid_t gid)
85148+{
85149+ return;
85150+}
85151+
85152+int
85153+gr_acl_handle_procpidmem(const struct task_struct *task)
85154+{
85155+ return 0;
85156+}
85157+
85158+int
85159+gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
85160+{
85161+ return 0;
85162+}
85163+
85164+int
85165+gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
85166+{
85167+ return 0;
85168+}
85169+
85170+int
85171+gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs)
85172+{
85173+ return 0;
85174+}
85175+
85176+int
85177+gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs)
85178+{
85179+ return 0;
85180+}
85181+
85182+int gr_acl_enable_at_secure(void)
85183+{
85184+ return 0;
85185+}
85186+
85187+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
85188+{
85189+ return dentry->d_sb->s_dev;
85190+}
85191+
85192+u64 gr_get_ino_from_dentry(struct dentry *dentry)
85193+{
85194+ return dentry->d_inode->i_ino;
85195+}
85196+
85197+void gr_put_exec_file(struct task_struct *task)
85198+{
85199+ return;
85200+}
85201+
85202+#ifdef CONFIG_SECURITY
85203+EXPORT_SYMBOL_GPL(gr_check_user_change);
85204+EXPORT_SYMBOL_GPL(gr_check_group_change);
85205+#endif
85206diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c
85207new file mode 100644
85208index 0000000..fb7531e
85209--- /dev/null
85210+++ b/grsecurity/grsec_exec.c
85211@@ -0,0 +1,189 @@
85212+#include <linux/kernel.h>
85213+#include <linux/sched.h>
85214+#include <linux/file.h>
85215+#include <linux/binfmts.h>
85216+#include <linux/fs.h>
85217+#include <linux/types.h>
85218+#include <linux/grdefs.h>
85219+#include <linux/grsecurity.h>
85220+#include <linux/grinternal.h>
85221+#include <linux/capability.h>
85222+#include <linux/module.h>
85223+#include <linux/compat.h>
85224+
85225+#include <asm/uaccess.h>
85226+
85227+#ifdef CONFIG_GRKERNSEC_EXECLOG
85228+static char gr_exec_arg_buf[132];
85229+static DEFINE_MUTEX(gr_exec_arg_mutex);
85230+#endif
85231+
85232+struct user_arg_ptr {
85233+#ifdef CONFIG_COMPAT
85234+ bool is_compat;
85235+#endif
85236+ union {
85237+ const char __user *const __user *native;
85238+#ifdef CONFIG_COMPAT
85239+ const compat_uptr_t __user *compat;
85240+#endif
85241+ } ptr;
85242+};
85243+
85244+extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr);
85245+
85246+void
85247+gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv)
85248+{
85249+#ifdef CONFIG_GRKERNSEC_EXECLOG
85250+ char *grarg = gr_exec_arg_buf;
85251+ unsigned int i, x, execlen = 0;
85252+ char c;
85253+
85254+ if (!((grsec_enable_execlog && grsec_enable_group &&
85255+ in_group_p(grsec_audit_gid))
85256+ || (grsec_enable_execlog && !grsec_enable_group)))
85257+ return;
85258+
85259+ mutex_lock(&gr_exec_arg_mutex);
85260+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
85261+
85262+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
85263+ const char __user *p;
85264+ unsigned int len;
85265+
85266+ p = get_user_arg_ptr(argv, i);
85267+ if (IS_ERR(p))
85268+ goto log;
85269+
85270+ len = strnlen_user(p, 128 - execlen);
85271+ if (len > 128 - execlen)
85272+ len = 128 - execlen;
85273+ else if (len > 0)
85274+ len--;
85275+ if (copy_from_user(grarg + execlen, p, len))
85276+ goto log;
85277+
85278+ /* rewrite unprintable characters */
85279+ for (x = 0; x < len; x++) {
85280+ c = *(grarg + execlen + x);
85281+ if (c < 32 || c > 126)
85282+ *(grarg + execlen + x) = ' ';
85283+ }
85284+
85285+ execlen += len;
85286+ *(grarg + execlen) = ' ';
85287+ *(grarg + execlen + 1) = '\0';
85288+ execlen++;
85289+ }
85290+
85291+ log:
85292+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
85293+ bprm->file->f_path.mnt, grarg);
85294+ mutex_unlock(&gr_exec_arg_mutex);
85295+#endif
85296+ return;
85297+}
85298+
85299+#ifdef CONFIG_GRKERNSEC
85300+extern int gr_acl_is_capable(const int cap);
85301+extern int gr_acl_is_capable_nolog(const int cap);
85302+extern int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
85303+extern int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap);
85304+extern int gr_chroot_is_capable(const int cap);
85305+extern int gr_chroot_is_capable_nolog(const int cap);
85306+extern int gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
85307+extern int gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap);
85308+#endif
85309+
85310+const char *captab_log[] = {
85311+ "CAP_CHOWN",
85312+ "CAP_DAC_OVERRIDE",
85313+ "CAP_DAC_READ_SEARCH",
85314+ "CAP_FOWNER",
85315+ "CAP_FSETID",
85316+ "CAP_KILL",
85317+ "CAP_SETGID",
85318+ "CAP_SETUID",
85319+ "CAP_SETPCAP",
85320+ "CAP_LINUX_IMMUTABLE",
85321+ "CAP_NET_BIND_SERVICE",
85322+ "CAP_NET_BROADCAST",
85323+ "CAP_NET_ADMIN",
85324+ "CAP_NET_RAW",
85325+ "CAP_IPC_LOCK",
85326+ "CAP_IPC_OWNER",
85327+ "CAP_SYS_MODULE",
85328+ "CAP_SYS_RAWIO",
85329+ "CAP_SYS_CHROOT",
85330+ "CAP_SYS_PTRACE",
85331+ "CAP_SYS_PACCT",
85332+ "CAP_SYS_ADMIN",
85333+ "CAP_SYS_BOOT",
85334+ "CAP_SYS_NICE",
85335+ "CAP_SYS_RESOURCE",
85336+ "CAP_SYS_TIME",
85337+ "CAP_SYS_TTY_CONFIG",
85338+ "CAP_MKNOD",
85339+ "CAP_LEASE",
85340+ "CAP_AUDIT_WRITE",
85341+ "CAP_AUDIT_CONTROL",
85342+ "CAP_SETFCAP",
85343+ "CAP_MAC_OVERRIDE",
85344+ "CAP_MAC_ADMIN",
85345+ "CAP_SYSLOG",
85346+ "CAP_WAKE_ALARM",
85347+ "CAP_BLOCK_SUSPEND",
85348+ "CAP_AUDIT_READ"
85349+};
85350+
85351+int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
85352+
85353+int gr_is_capable(const int cap)
85354+{
85355+#ifdef CONFIG_GRKERNSEC
85356+ if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
85357+ return 1;
85358+ return 0;
85359+#else
85360+ return 1;
85361+#endif
85362+}
85363+
85364+int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
85365+{
85366+#ifdef CONFIG_GRKERNSEC
85367+ if (gr_task_acl_is_capable(task, cred, cap) && gr_task_chroot_is_capable(task, cred, cap))
85368+ return 1;
85369+ return 0;
85370+#else
85371+ return 1;
85372+#endif
85373+}
85374+
85375+int gr_is_capable_nolog(const int cap)
85376+{
85377+#ifdef CONFIG_GRKERNSEC
85378+ if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
85379+ return 1;
85380+ return 0;
85381+#else
85382+ return 1;
85383+#endif
85384+}
85385+
85386+int gr_task_is_capable_nolog(const struct task_struct *task, const int cap)
85387+{
85388+#ifdef CONFIG_GRKERNSEC
85389+ if (gr_task_acl_is_capable_nolog(task, cap) && gr_task_chroot_is_capable_nolog(task, cap))
85390+ return 1;
85391+ return 0;
85392+#else
85393+ return 1;
85394+#endif
85395+}
85396+
85397+EXPORT_SYMBOL_GPL(gr_is_capable);
85398+EXPORT_SYMBOL_GPL(gr_is_capable_nolog);
85399+EXPORT_SYMBOL_GPL(gr_task_is_capable);
85400+EXPORT_SYMBOL_GPL(gr_task_is_capable_nolog);
85401diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c
85402new file mode 100644
85403index 0000000..06cc6ea
85404--- /dev/null
85405+++ b/grsecurity/grsec_fifo.c
85406@@ -0,0 +1,24 @@
85407+#include <linux/kernel.h>
85408+#include <linux/sched.h>
85409+#include <linux/fs.h>
85410+#include <linux/file.h>
85411+#include <linux/grinternal.h>
85412+
85413+int
85414+gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
85415+ const struct dentry *dir, const int flag, const int acc_mode)
85416+{
85417+#ifdef CONFIG_GRKERNSEC_FIFO
85418+ const struct cred *cred = current_cred();
85419+
85420+ if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
85421+ !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
85422+ !uid_eq(dentry->d_inode->i_uid, dir->d_inode->i_uid) &&
85423+ !uid_eq(cred->fsuid, dentry->d_inode->i_uid)) {
85424+ if (!inode_permission(dentry->d_inode, acc_mode))
85425+ gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, GR_GLOBAL_UID(dentry->d_inode->i_uid), GR_GLOBAL_GID(dentry->d_inode->i_gid));
85426+ return -EACCES;
85427+ }
85428+#endif
85429+ return 0;
85430+}
85431diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c
85432new file mode 100644
85433index 0000000..8ca18bf
85434--- /dev/null
85435+++ b/grsecurity/grsec_fork.c
85436@@ -0,0 +1,23 @@
85437+#include <linux/kernel.h>
85438+#include <linux/sched.h>
85439+#include <linux/grsecurity.h>
85440+#include <linux/grinternal.h>
85441+#include <linux/errno.h>
85442+
85443+void
85444+gr_log_forkfail(const int retval)
85445+{
85446+#ifdef CONFIG_GRKERNSEC_FORKFAIL
85447+ if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
85448+ switch (retval) {
85449+ case -EAGAIN:
85450+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
85451+ break;
85452+ case -ENOMEM:
85453+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
85454+ break;
85455+ }
85456+ }
85457+#endif
85458+ return;
85459+}
85460diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
85461new file mode 100644
85462index 0000000..4ed9e7d
85463--- /dev/null
85464+++ b/grsecurity/grsec_init.c
85465@@ -0,0 +1,290 @@
85466+#include <linux/kernel.h>
85467+#include <linux/sched.h>
85468+#include <linux/mm.h>
85469+#include <linux/gracl.h>
85470+#include <linux/slab.h>
85471+#include <linux/vmalloc.h>
85472+#include <linux/percpu.h>
85473+#include <linux/module.h>
85474+
85475+int grsec_enable_ptrace_readexec;
85476+int grsec_enable_setxid;
85477+int grsec_enable_symlinkown;
85478+kgid_t grsec_symlinkown_gid;
85479+int grsec_enable_brute;
85480+int grsec_enable_link;
85481+int grsec_enable_dmesg;
85482+int grsec_enable_harden_ptrace;
85483+int grsec_enable_harden_ipc;
85484+int grsec_enable_fifo;
85485+int grsec_enable_execlog;
85486+int grsec_enable_signal;
85487+int grsec_enable_forkfail;
85488+int grsec_enable_audit_ptrace;
85489+int grsec_enable_time;
85490+int grsec_enable_group;
85491+kgid_t grsec_audit_gid;
85492+int grsec_enable_chdir;
85493+int grsec_enable_mount;
85494+int grsec_enable_rofs;
85495+int grsec_deny_new_usb;
85496+int grsec_enable_chroot_findtask;
85497+int grsec_enable_chroot_mount;
85498+int grsec_enable_chroot_shmat;
85499+int grsec_enable_chroot_fchdir;
85500+int grsec_enable_chroot_double;
85501+int grsec_enable_chroot_pivot;
85502+int grsec_enable_chroot_chdir;
85503+int grsec_enable_chroot_chmod;
85504+int grsec_enable_chroot_mknod;
85505+int grsec_enable_chroot_nice;
85506+int grsec_enable_chroot_execlog;
85507+int grsec_enable_chroot_caps;
85508+int grsec_enable_chroot_rename;
85509+int grsec_enable_chroot_sysctl;
85510+int grsec_enable_chroot_unix;
85511+int grsec_enable_tpe;
85512+kgid_t grsec_tpe_gid;
85513+int grsec_enable_blackhole;
85514+#ifdef CONFIG_IPV6_MODULE
85515+EXPORT_SYMBOL_GPL(grsec_enable_blackhole);
85516+#endif
85517+int grsec_lastack_retries;
85518+int grsec_enable_tpe_all;
85519+int grsec_enable_tpe_invert;
85520+int grsec_enable_socket_all;
85521+kgid_t grsec_socket_all_gid;
85522+int grsec_enable_socket_client;
85523+kgid_t grsec_socket_client_gid;
85524+int grsec_enable_socket_server;
85525+kgid_t grsec_socket_server_gid;
85526+int grsec_resource_logging;
85527+int grsec_disable_privio;
85528+int grsec_enable_log_rwxmaps;
85529+int grsec_lock;
85530+
85531+DEFINE_SPINLOCK(grsec_alert_lock);
85532+unsigned long grsec_alert_wtime = 0;
85533+unsigned long grsec_alert_fyet = 0;
85534+
85535+DEFINE_SPINLOCK(grsec_audit_lock);
85536+
85537+DEFINE_RWLOCK(grsec_exec_file_lock);
85538+
85539+char *gr_shared_page[4];
85540+
85541+char *gr_alert_log_fmt;
85542+char *gr_audit_log_fmt;
85543+char *gr_alert_log_buf;
85544+char *gr_audit_log_buf;
85545+
85546+extern struct gr_arg *gr_usermode;
85547+extern unsigned char *gr_system_salt;
85548+extern unsigned char *gr_system_sum;
85549+
85550+void __init
85551+grsecurity_init(void)
85552+{
85553+ int j;
85554+ /* create the per-cpu shared pages */
85555+
85556+#ifdef CONFIG_X86
85557+ memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
85558+#endif
85559+
85560+ for (j = 0; j < 4; j++) {
85561+ gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
85562+ if (gr_shared_page[j] == NULL) {
85563+ panic("Unable to allocate grsecurity shared page");
85564+ return;
85565+ }
85566+ }
85567+
85568+ /* allocate log buffers */
85569+ gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
85570+ if (!gr_alert_log_fmt) {
85571+ panic("Unable to allocate grsecurity alert log format buffer");
85572+ return;
85573+ }
85574+ gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
85575+ if (!gr_audit_log_fmt) {
85576+ panic("Unable to allocate grsecurity audit log format buffer");
85577+ return;
85578+ }
85579+ gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
85580+ if (!gr_alert_log_buf) {
85581+ panic("Unable to allocate grsecurity alert log buffer");
85582+ return;
85583+ }
85584+ gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
85585+ if (!gr_audit_log_buf) {
85586+ panic("Unable to allocate grsecurity audit log buffer");
85587+ return;
85588+ }
85589+
85590+ /* allocate memory for authentication structure */
85591+ gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
85592+ gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
85593+ gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
85594+
85595+ if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
85596+ panic("Unable to allocate grsecurity authentication structure");
85597+ return;
85598+ }
85599+
85600+#ifdef CONFIG_GRKERNSEC_IO
85601+#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
85602+ grsec_disable_privio = 1;
85603+#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
85604+ grsec_disable_privio = 1;
85605+#else
85606+ grsec_disable_privio = 0;
85607+#endif
85608+#endif
85609+
85610+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
85611+ /* for backward compatibility, tpe_invert always defaults to on if
85612+ enabled in the kernel
85613+ */
85614+ grsec_enable_tpe_invert = 1;
85615+#endif
85616+
85617+#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
85618+#ifndef CONFIG_GRKERNSEC_SYSCTL
85619+ grsec_lock = 1;
85620+#endif
85621+
85622+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
85623+ grsec_enable_log_rwxmaps = 1;
85624+#endif
85625+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
85626+ grsec_enable_group = 1;
85627+ grsec_audit_gid = KGIDT_INIT(CONFIG_GRKERNSEC_AUDIT_GID);
85628+#endif
85629+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
85630+ grsec_enable_ptrace_readexec = 1;
85631+#endif
85632+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
85633+ grsec_enable_chdir = 1;
85634+#endif
85635+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
85636+ grsec_enable_harden_ptrace = 1;
85637+#endif
85638+#ifdef CONFIG_GRKERNSEC_HARDEN_IPC
85639+ grsec_enable_harden_ipc = 1;
85640+#endif
85641+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
85642+ grsec_enable_mount = 1;
85643+#endif
85644+#ifdef CONFIG_GRKERNSEC_LINK
85645+ grsec_enable_link = 1;
85646+#endif
85647+#ifdef CONFIG_GRKERNSEC_BRUTE
85648+ grsec_enable_brute = 1;
85649+#endif
85650+#ifdef CONFIG_GRKERNSEC_DMESG
85651+ grsec_enable_dmesg = 1;
85652+#endif
85653+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
85654+ grsec_enable_blackhole = 1;
85655+ grsec_lastack_retries = 4;
85656+#endif
85657+#ifdef CONFIG_GRKERNSEC_FIFO
85658+ grsec_enable_fifo = 1;
85659+#endif
85660+#ifdef CONFIG_GRKERNSEC_EXECLOG
85661+ grsec_enable_execlog = 1;
85662+#endif
85663+#ifdef CONFIG_GRKERNSEC_SETXID
85664+ grsec_enable_setxid = 1;
85665+#endif
85666+#ifdef CONFIG_GRKERNSEC_SIGNAL
85667+ grsec_enable_signal = 1;
85668+#endif
85669+#ifdef CONFIG_GRKERNSEC_FORKFAIL
85670+ grsec_enable_forkfail = 1;
85671+#endif
85672+#ifdef CONFIG_GRKERNSEC_TIME
85673+ grsec_enable_time = 1;
85674+#endif
85675+#ifdef CONFIG_GRKERNSEC_RESLOG
85676+ grsec_resource_logging = 1;
85677+#endif
85678+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
85679+ grsec_enable_chroot_findtask = 1;
85680+#endif
85681+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
85682+ grsec_enable_chroot_unix = 1;
85683+#endif
85684+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
85685+ grsec_enable_chroot_mount = 1;
85686+#endif
85687+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
85688+ grsec_enable_chroot_fchdir = 1;
85689+#endif
85690+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
85691+ grsec_enable_chroot_shmat = 1;
85692+#endif
85693+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
85694+ grsec_enable_audit_ptrace = 1;
85695+#endif
85696+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
85697+ grsec_enable_chroot_double = 1;
85698+#endif
85699+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
85700+ grsec_enable_chroot_pivot = 1;
85701+#endif
85702+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
85703+ grsec_enable_chroot_chdir = 1;
85704+#endif
85705+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
85706+ grsec_enable_chroot_chmod = 1;
85707+#endif
85708+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
85709+ grsec_enable_chroot_mknod = 1;
85710+#endif
85711+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
85712+ grsec_enable_chroot_nice = 1;
85713+#endif
85714+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
85715+ grsec_enable_chroot_execlog = 1;
85716+#endif
85717+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
85718+ grsec_enable_chroot_caps = 1;
85719+#endif
85720+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
85721+ grsec_enable_chroot_rename = 1;
85722+#endif
85723+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
85724+ grsec_enable_chroot_sysctl = 1;
85725+#endif
85726+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
85727+ grsec_enable_symlinkown = 1;
85728+ grsec_symlinkown_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SYMLINKOWN_GID);
85729+#endif
85730+#ifdef CONFIG_GRKERNSEC_TPE
85731+ grsec_enable_tpe = 1;
85732+ grsec_tpe_gid = KGIDT_INIT(CONFIG_GRKERNSEC_TPE_GID);
85733+#ifdef CONFIG_GRKERNSEC_TPE_ALL
85734+ grsec_enable_tpe_all = 1;
85735+#endif
85736+#endif
85737+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
85738+ grsec_enable_socket_all = 1;
85739+ grsec_socket_all_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_ALL_GID);
85740+#endif
85741+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
85742+ grsec_enable_socket_client = 1;
85743+ grsec_socket_client_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_CLIENT_GID);
85744+#endif
85745+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
85746+ grsec_enable_socket_server = 1;
85747+ grsec_socket_server_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_SERVER_GID);
85748+#endif
85749+#endif
85750+#ifdef CONFIG_GRKERNSEC_DENYUSB_FORCE
85751+ grsec_deny_new_usb = 1;
85752+#endif
85753+
85754+ return;
85755+}
85756diff --git a/grsecurity/grsec_ipc.c b/grsecurity/grsec_ipc.c
85757new file mode 100644
85758index 0000000..1773300
85759--- /dev/null
85760+++ b/grsecurity/grsec_ipc.c
85761@@ -0,0 +1,48 @@
85762+#include <linux/kernel.h>
85763+#include <linux/mm.h>
85764+#include <linux/sched.h>
85765+#include <linux/file.h>
85766+#include <linux/ipc.h>
85767+#include <linux/ipc_namespace.h>
85768+#include <linux/grsecurity.h>
85769+#include <linux/grinternal.h>
85770+
85771+int
85772+gr_ipc_permitted(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, int requested_mode, int granted_mode)
85773+{
85774+#ifdef CONFIG_GRKERNSEC_HARDEN_IPC
85775+ int write;
85776+ int orig_granted_mode;
85777+ kuid_t euid;
85778+ kgid_t egid;
85779+
85780+ if (!grsec_enable_harden_ipc)
85781+ return 1;
85782+
85783+ euid = current_euid();
85784+ egid = current_egid();
85785+
85786+ write = requested_mode & 00002;
85787+ orig_granted_mode = ipcp->mode;
85788+
85789+ if (uid_eq(euid, ipcp->cuid) || uid_eq(euid, ipcp->uid))
85790+ orig_granted_mode >>= 6;
85791+ else {
85792+ /* if likely wrong permissions, lock to user */
85793+ if (orig_granted_mode & 0007)
85794+ orig_granted_mode = 0;
85795+ /* otherwise do a egid-only check */
85796+ else if (gid_eq(egid, ipcp->cgid) || gid_eq(egid, ipcp->gid))
85797+ orig_granted_mode >>= 3;
85798+ /* otherwise, no access */
85799+ else
85800+ orig_granted_mode = 0;
85801+ }
85802+ if (!(requested_mode & ~granted_mode & 0007) && (requested_mode & ~orig_granted_mode & 0007) &&
85803+ !ns_capable_nolog(ns->user_ns, CAP_IPC_OWNER)) {
85804+ gr_log_str_int(GR_DONT_AUDIT, GR_IPC_DENIED_MSG, write ? "write" : "read", GR_GLOBAL_UID(ipcp->cuid));
85805+ return 0;
85806+ }
85807+#endif
85808+ return 1;
85809+}
85810diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c
85811new file mode 100644
85812index 0000000..5e05e20
85813--- /dev/null
85814+++ b/grsecurity/grsec_link.c
85815@@ -0,0 +1,58 @@
85816+#include <linux/kernel.h>
85817+#include <linux/sched.h>
85818+#include <linux/fs.h>
85819+#include <linux/file.h>
85820+#include <linux/grinternal.h>
85821+
85822+int gr_handle_symlink_owner(const struct path *link, const struct inode *target)
85823+{
85824+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
85825+ const struct inode *link_inode = link->dentry->d_inode;
85826+
85827+ if (grsec_enable_symlinkown && in_group_p(grsec_symlinkown_gid) &&
85828+ /* ignore root-owned links, e.g. /proc/self */
85829+ gr_is_global_nonroot(link_inode->i_uid) && target &&
85830+ !uid_eq(link_inode->i_uid, target->i_uid)) {
85831+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINKOWNER_MSG, link->dentry, link->mnt, link_inode->i_uid, target->i_uid);
85832+ return 1;
85833+ }
85834+#endif
85835+ return 0;
85836+}
85837+
85838+int
85839+gr_handle_follow_link(const struct inode *parent,
85840+ const struct inode *inode,
85841+ const struct dentry *dentry, const struct vfsmount *mnt)
85842+{
85843+#ifdef CONFIG_GRKERNSEC_LINK
85844+ const struct cred *cred = current_cred();
85845+
85846+ if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
85847+ (parent->i_mode & S_ISVTX) && !uid_eq(parent->i_uid, inode->i_uid) &&
85848+ (parent->i_mode & S_IWOTH) && !uid_eq(cred->fsuid, inode->i_uid)) {
85849+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
85850+ return -EACCES;
85851+ }
85852+#endif
85853+ return 0;
85854+}
85855+
85856+int
85857+gr_handle_hardlink(const struct dentry *dentry,
85858+ const struct vfsmount *mnt,
85859+ struct inode *inode, const int mode, const struct filename *to)
85860+{
85861+#ifdef CONFIG_GRKERNSEC_LINK
85862+ const struct cred *cred = current_cred();
85863+
85864+ if (grsec_enable_link && !uid_eq(cred->fsuid, inode->i_uid) &&
85865+ (!S_ISREG(mode) || is_privileged_binary(dentry) ||
85866+ (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
85867+ !capable(CAP_FOWNER) && gr_is_global_nonroot(cred->uid)) {
85868+ gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to->name);
85869+ return -EPERM;
85870+ }
85871+#endif
85872+ return 0;
85873+}
85874diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c
85875new file mode 100644
85876index 0000000..dbe0a6b
85877--- /dev/null
85878+++ b/grsecurity/grsec_log.c
85879@@ -0,0 +1,341 @@
85880+#include <linux/kernel.h>
85881+#include <linux/sched.h>
85882+#include <linux/file.h>
85883+#include <linux/tty.h>
85884+#include <linux/fs.h>
85885+#include <linux/mm.h>
85886+#include <linux/grinternal.h>
85887+
85888+#ifdef CONFIG_TREE_PREEMPT_RCU
85889+#define DISABLE_PREEMPT() preempt_disable()
85890+#define ENABLE_PREEMPT() preempt_enable()
85891+#else
85892+#define DISABLE_PREEMPT()
85893+#define ENABLE_PREEMPT()
85894+#endif
85895+
85896+#define BEGIN_LOCKS(x) \
85897+ DISABLE_PREEMPT(); \
85898+ rcu_read_lock(); \
85899+ read_lock(&tasklist_lock); \
85900+ read_lock(&grsec_exec_file_lock); \
85901+ if (x != GR_DO_AUDIT) \
85902+ spin_lock(&grsec_alert_lock); \
85903+ else \
85904+ spin_lock(&grsec_audit_lock)
85905+
85906+#define END_LOCKS(x) \
85907+ if (x != GR_DO_AUDIT) \
85908+ spin_unlock(&grsec_alert_lock); \
85909+ else \
85910+ spin_unlock(&grsec_audit_lock); \
85911+ read_unlock(&grsec_exec_file_lock); \
85912+ read_unlock(&tasklist_lock); \
85913+ rcu_read_unlock(); \
85914+ ENABLE_PREEMPT(); \
85915+ if (x == GR_DONT_AUDIT) \
85916+ gr_handle_alertkill(current)
85917+
85918+enum {
85919+ FLOODING,
85920+ NO_FLOODING
85921+};
85922+
85923+extern char *gr_alert_log_fmt;
85924+extern char *gr_audit_log_fmt;
85925+extern char *gr_alert_log_buf;
85926+extern char *gr_audit_log_buf;
85927+
85928+static int gr_log_start(int audit)
85929+{
85930+ char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
85931+ char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
85932+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
85933+#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
85934+ unsigned long curr_secs = get_seconds();
85935+
85936+ if (audit == GR_DO_AUDIT)
85937+ goto set_fmt;
85938+
85939+ if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
85940+ grsec_alert_wtime = curr_secs;
85941+ grsec_alert_fyet = 0;
85942+ } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
85943+ && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
85944+ grsec_alert_fyet++;
85945+ } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
85946+ grsec_alert_wtime = curr_secs;
85947+ grsec_alert_fyet++;
85948+ printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
85949+ return FLOODING;
85950+ }
85951+ else return FLOODING;
85952+
85953+set_fmt:
85954+#endif
85955+ memset(buf, 0, PAGE_SIZE);
85956+ if (current->signal->curr_ip && gr_acl_is_enabled()) {
85957+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
85958+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
85959+ } else if (current->signal->curr_ip) {
85960+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
85961+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
85962+ } else if (gr_acl_is_enabled()) {
85963+ sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
85964+ snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
85965+ } else {
85966+ sprintf(fmt, "%s%s", loglevel, "grsec: ");
85967+ strcpy(buf, fmt);
85968+ }
85969+
85970+ return NO_FLOODING;
85971+}
85972+
85973+static void gr_log_middle(int audit, const char *msg, va_list ap)
85974+ __attribute__ ((format (printf, 2, 0)));
85975+
85976+static void gr_log_middle(int audit, const char *msg, va_list ap)
85977+{
85978+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
85979+ unsigned int len = strlen(buf);
85980+
85981+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
85982+
85983+ return;
85984+}
85985+
85986+static void gr_log_middle_varargs(int audit, const char *msg, ...)
85987+ __attribute__ ((format (printf, 2, 3)));
85988+
85989+static void gr_log_middle_varargs(int audit, const char *msg, ...)
85990+{
85991+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
85992+ unsigned int len = strlen(buf);
85993+ va_list ap;
85994+
85995+ va_start(ap, msg);
85996+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
85997+ va_end(ap);
85998+
85999+ return;
86000+}
86001+
86002+static void gr_log_end(int audit, int append_default)
86003+{
86004+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
86005+ if (append_default) {
86006+ struct task_struct *task = current;
86007+ struct task_struct *parent = task->real_parent;
86008+ const struct cred *cred = __task_cred(task);
86009+ const struct cred *pcred = __task_cred(parent);
86010+ unsigned int len = strlen(buf);
86011+
86012+ snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
86013+ }
86014+
86015+ printk("%s\n", buf);
86016+
86017+ return;
86018+}
86019+
86020+void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
86021+{
86022+ int logtype;
86023+ char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
86024+ char *str1 = NULL, *str2 = NULL, *str3 = NULL;
86025+ void *voidptr = NULL;
86026+ int num1 = 0, num2 = 0;
86027+ unsigned long ulong1 = 0, ulong2 = 0;
86028+ struct dentry *dentry = NULL;
86029+ struct vfsmount *mnt = NULL;
86030+ struct file *file = NULL;
86031+ struct task_struct *task = NULL;
86032+ struct vm_area_struct *vma = NULL;
86033+ const struct cred *cred, *pcred;
86034+ va_list ap;
86035+
86036+ BEGIN_LOCKS(audit);
86037+ logtype = gr_log_start(audit);
86038+ if (logtype == FLOODING) {
86039+ END_LOCKS(audit);
86040+ return;
86041+ }
86042+ va_start(ap, argtypes);
86043+ switch (argtypes) {
86044+ case GR_TTYSNIFF:
86045+ task = va_arg(ap, struct task_struct *);
86046+ gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task_pid_nr(task), gr_parent_task_fullpath0(task), task->real_parent->comm, task_pid_nr(task->real_parent));
86047+ break;
86048+ case GR_SYSCTL_HIDDEN:
86049+ str1 = va_arg(ap, char *);
86050+ gr_log_middle_varargs(audit, msg, result, str1);
86051+ break;
86052+ case GR_RBAC:
86053+ dentry = va_arg(ap, struct dentry *);
86054+ mnt = va_arg(ap, struct vfsmount *);
86055+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
86056+ break;
86057+ case GR_RBAC_STR:
86058+ dentry = va_arg(ap, struct dentry *);
86059+ mnt = va_arg(ap, struct vfsmount *);
86060+ str1 = va_arg(ap, char *);
86061+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
86062+ break;
86063+ case GR_STR_RBAC:
86064+ str1 = va_arg(ap, char *);
86065+ dentry = va_arg(ap, struct dentry *);
86066+ mnt = va_arg(ap, struct vfsmount *);
86067+ gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
86068+ break;
86069+ case GR_RBAC_MODE2:
86070+ dentry = va_arg(ap, struct dentry *);
86071+ mnt = va_arg(ap, struct vfsmount *);
86072+ str1 = va_arg(ap, char *);
86073+ str2 = va_arg(ap, char *);
86074+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
86075+ break;
86076+ case GR_RBAC_MODE3:
86077+ dentry = va_arg(ap, struct dentry *);
86078+ mnt = va_arg(ap, struct vfsmount *);
86079+ str1 = va_arg(ap, char *);
86080+ str2 = va_arg(ap, char *);
86081+ str3 = va_arg(ap, char *);
86082+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
86083+ break;
86084+ case GR_FILENAME:
86085+ dentry = va_arg(ap, struct dentry *);
86086+ mnt = va_arg(ap, struct vfsmount *);
86087+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
86088+ break;
86089+ case GR_STR_FILENAME:
86090+ str1 = va_arg(ap, char *);
86091+ dentry = va_arg(ap, struct dentry *);
86092+ mnt = va_arg(ap, struct vfsmount *);
86093+ gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
86094+ break;
86095+ case GR_FILENAME_STR:
86096+ dentry = va_arg(ap, struct dentry *);
86097+ mnt = va_arg(ap, struct vfsmount *);
86098+ str1 = va_arg(ap, char *);
86099+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
86100+ break;
86101+ case GR_FILENAME_TWO_INT:
86102+ dentry = va_arg(ap, struct dentry *);
86103+ mnt = va_arg(ap, struct vfsmount *);
86104+ num1 = va_arg(ap, int);
86105+ num2 = va_arg(ap, int);
86106+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
86107+ break;
86108+ case GR_FILENAME_TWO_INT_STR:
86109+ dentry = va_arg(ap, struct dentry *);
86110+ mnt = va_arg(ap, struct vfsmount *);
86111+ num1 = va_arg(ap, int);
86112+ num2 = va_arg(ap, int);
86113+ str1 = va_arg(ap, char *);
86114+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
86115+ break;
86116+ case GR_TEXTREL:
86117+ file = va_arg(ap, struct file *);
86118+ ulong1 = va_arg(ap, unsigned long);
86119+ ulong2 = va_arg(ap, unsigned long);
86120+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
86121+ break;
86122+ case GR_PTRACE:
86123+ task = va_arg(ap, struct task_struct *);
86124+ gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task_pid_nr(task));
86125+ break;
86126+ case GR_RESOURCE:
86127+ task = va_arg(ap, struct task_struct *);
86128+ cred = __task_cred(task);
86129+ pcred = __task_cred(task->real_parent);
86130+ ulong1 = va_arg(ap, unsigned long);
86131+ str1 = va_arg(ap, char *);
86132+ ulong2 = va_arg(ap, unsigned long);
86133+ gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
86134+ break;
86135+ case GR_CAP:
86136+ task = va_arg(ap, struct task_struct *);
86137+ cred = __task_cred(task);
86138+ pcred = __task_cred(task->real_parent);
86139+ str1 = va_arg(ap, char *);
86140+ gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
86141+ break;
86142+ case GR_SIG:
86143+ str1 = va_arg(ap, char *);
86144+ voidptr = va_arg(ap, void *);
86145+ gr_log_middle_varargs(audit, msg, str1, voidptr);
86146+ break;
86147+ case GR_SIG2:
86148+ task = va_arg(ap, struct task_struct *);
86149+ cred = __task_cred(task);
86150+ pcred = __task_cred(task->real_parent);
86151+ num1 = va_arg(ap, int);
86152+ gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath0(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
86153+ break;
86154+ case GR_CRASH1:
86155+ task = va_arg(ap, struct task_struct *);
86156+ cred = __task_cred(task);
86157+ pcred = __task_cred(task->real_parent);
86158+ ulong1 = va_arg(ap, unsigned long);
86159+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid), GR_GLOBAL_UID(cred->uid), ulong1);
86160+ break;
86161+ case GR_CRASH2:
86162+ task = va_arg(ap, struct task_struct *);
86163+ cred = __task_cred(task);
86164+ pcred = __task_cred(task->real_parent);
86165+ ulong1 = va_arg(ap, unsigned long);
86166+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid), ulong1);
86167+ break;
86168+ case GR_RWXMAP:
86169+ file = va_arg(ap, struct file *);
86170+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
86171+ break;
86172+ case GR_RWXMAPVMA:
86173+ vma = va_arg(ap, struct vm_area_struct *);
86174+ if (vma->vm_file)
86175+ str1 = gr_to_filename(vma->vm_file->f_path.dentry, vma->vm_file->f_path.mnt);
86176+ else if (vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP))
86177+ str1 = "<stack>";
86178+ else if (vma->vm_start <= current->mm->brk &&
86179+ vma->vm_end >= current->mm->start_brk)
86180+ str1 = "<heap>";
86181+ else
86182+ str1 = "<anonymous mapping>";
86183+ gr_log_middle_varargs(audit, msg, str1);
86184+ break;
86185+ case GR_PSACCT:
86186+ {
86187+ unsigned int wday, cday;
86188+ __u8 whr, chr;
86189+ __u8 wmin, cmin;
86190+ __u8 wsec, csec;
86191+ char cur_tty[64] = { 0 };
86192+ char parent_tty[64] = { 0 };
86193+
86194+ task = va_arg(ap, struct task_struct *);
86195+ wday = va_arg(ap, unsigned int);
86196+ cday = va_arg(ap, unsigned int);
86197+ whr = va_arg(ap, int);
86198+ chr = va_arg(ap, int);
86199+ wmin = va_arg(ap, int);
86200+ cmin = va_arg(ap, int);
86201+ wsec = va_arg(ap, int);
86202+ csec = va_arg(ap, int);
86203+ ulong1 = va_arg(ap, unsigned long);
86204+ cred = __task_cred(task);
86205+ pcred = __task_cred(task->real_parent);
86206+
86207+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
86208+ }
86209+ break;
86210+ default:
86211+ gr_log_middle(audit, msg, ap);
86212+ }
86213+ va_end(ap);
86214+ // these don't need DEFAULTSECARGS printed on the end
86215+ if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
86216+ gr_log_end(audit, 0);
86217+ else
86218+ gr_log_end(audit, 1);
86219+ END_LOCKS(audit);
86220+}
86221diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c
86222new file mode 100644
86223index 0000000..0e39d8c
86224--- /dev/null
86225+++ b/grsecurity/grsec_mem.c
86226@@ -0,0 +1,48 @@
86227+#include <linux/kernel.h>
86228+#include <linux/sched.h>
86229+#include <linux/mm.h>
86230+#include <linux/mman.h>
86231+#include <linux/module.h>
86232+#include <linux/grinternal.h>
86233+
86234+void gr_handle_msr_write(void)
86235+{
86236+ gr_log_noargs(GR_DONT_AUDIT, GR_MSRWRITE_MSG);
86237+ return;
86238+}
86239+EXPORT_SYMBOL_GPL(gr_handle_msr_write);
86240+
86241+void
86242+gr_handle_ioperm(void)
86243+{
86244+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
86245+ return;
86246+}
86247+
86248+void
86249+gr_handle_iopl(void)
86250+{
86251+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
86252+ return;
86253+}
86254+
86255+void
86256+gr_handle_mem_readwrite(u64 from, u64 to)
86257+{
86258+ gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
86259+ return;
86260+}
86261+
86262+void
86263+gr_handle_vm86(void)
86264+{
86265+ gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
86266+ return;
86267+}
86268+
86269+void
86270+gr_log_badprocpid(const char *entry)
86271+{
86272+ gr_log_str(GR_DONT_AUDIT, GR_BADPROCPID_MSG, entry);
86273+ return;
86274+}
86275diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c
86276new file mode 100644
86277index 0000000..6f9eb73
86278--- /dev/null
86279+++ b/grsecurity/grsec_mount.c
86280@@ -0,0 +1,65 @@
86281+#include <linux/kernel.h>
86282+#include <linux/sched.h>
86283+#include <linux/mount.h>
86284+#include <linux/major.h>
86285+#include <linux/grsecurity.h>
86286+#include <linux/grinternal.h>
86287+
86288+void
86289+gr_log_remount(const char *devname, const int retval)
86290+{
86291+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
86292+ if (grsec_enable_mount && (retval >= 0))
86293+ gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
86294+#endif
86295+ return;
86296+}
86297+
86298+void
86299+gr_log_unmount(const char *devname, const int retval)
86300+{
86301+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
86302+ if (grsec_enable_mount && (retval >= 0))
86303+ gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
86304+#endif
86305+ return;
86306+}
86307+
86308+void
86309+gr_log_mount(const char *from, struct path *to, const int retval)
86310+{
86311+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
86312+ if (grsec_enable_mount && (retval >= 0))
86313+ gr_log_str_fs(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to->dentry, to->mnt);
86314+#endif
86315+ return;
86316+}
86317+
86318+int
86319+gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
86320+{
86321+#ifdef CONFIG_GRKERNSEC_ROFS
86322+ if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
86323+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
86324+ return -EPERM;
86325+ } else
86326+ return 0;
86327+#endif
86328+ return 0;
86329+}
86330+
86331+int
86332+gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
86333+{
86334+#ifdef CONFIG_GRKERNSEC_ROFS
86335+ struct inode *inode = dentry->d_inode;
86336+
86337+ if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
86338+ inode && (S_ISBLK(inode->i_mode) || (S_ISCHR(inode->i_mode) && imajor(inode) == RAW_MAJOR))) {
86339+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
86340+ return -EPERM;
86341+ } else
86342+ return 0;
86343+#endif
86344+ return 0;
86345+}
86346diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c
86347new file mode 100644
86348index 0000000..6ee9d50
86349--- /dev/null
86350+++ b/grsecurity/grsec_pax.c
86351@@ -0,0 +1,45 @@
86352+#include <linux/kernel.h>
86353+#include <linux/sched.h>
86354+#include <linux/mm.h>
86355+#include <linux/file.h>
86356+#include <linux/grinternal.h>
86357+#include <linux/grsecurity.h>
86358+
86359+void
86360+gr_log_textrel(struct vm_area_struct * vma)
86361+{
86362+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
86363+ if (grsec_enable_log_rwxmaps)
86364+ gr_log_textrel_ulong_ulong(GR_DONT_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
86365+#endif
86366+ return;
86367+}
86368+
86369+void gr_log_ptgnustack(struct file *file)
86370+{
86371+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
86372+ if (grsec_enable_log_rwxmaps)
86373+ gr_log_rwxmap(GR_DONT_AUDIT, GR_PTGNUSTACK_MSG, file);
86374+#endif
86375+ return;
86376+}
86377+
86378+void
86379+gr_log_rwxmmap(struct file *file)
86380+{
86381+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
86382+ if (grsec_enable_log_rwxmaps)
86383+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
86384+#endif
86385+ return;
86386+}
86387+
86388+void
86389+gr_log_rwxmprotect(struct vm_area_struct *vma)
86390+{
86391+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
86392+ if (grsec_enable_log_rwxmaps)
86393+ gr_log_rwxmap_vma(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, vma);
86394+#endif
86395+ return;
86396+}
86397diff --git a/grsecurity/grsec_proc.c b/grsecurity/grsec_proc.c
86398new file mode 100644
86399index 0000000..2005a3a
86400--- /dev/null
86401+++ b/grsecurity/grsec_proc.c
86402@@ -0,0 +1,20 @@
86403+#include <linux/kernel.h>
86404+#include <linux/sched.h>
86405+#include <linux/grsecurity.h>
86406+#include <linux/grinternal.h>
86407+
86408+int gr_proc_is_restricted(void)
86409+{
86410+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
86411+ const struct cred *cred = current_cred();
86412+#endif
86413+
86414+#ifdef CONFIG_GRKERNSEC_PROC_USER
86415+ if (!uid_eq(cred->fsuid, GLOBAL_ROOT_UID))
86416+ return -EACCES;
86417+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
86418+ if (!uid_eq(cred->fsuid, GLOBAL_ROOT_UID) && !in_group_p(grsec_proc_gid))
86419+ return -EACCES;
86420+#endif
86421+ return 0;
86422+}
86423diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c
86424new file mode 100644
86425index 0000000..f7f29aa
86426--- /dev/null
86427+++ b/grsecurity/grsec_ptrace.c
86428@@ -0,0 +1,30 @@
86429+#include <linux/kernel.h>
86430+#include <linux/sched.h>
86431+#include <linux/grinternal.h>
86432+#include <linux/security.h>
86433+
86434+void
86435+gr_audit_ptrace(struct task_struct *task)
86436+{
86437+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
86438+ if (grsec_enable_audit_ptrace)
86439+ gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
86440+#endif
86441+ return;
86442+}
86443+
86444+int
86445+gr_ptrace_readexec(struct file *file, int unsafe_flags)
86446+{
86447+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
86448+ const struct dentry *dentry = file->f_path.dentry;
86449+ const struct vfsmount *mnt = file->f_path.mnt;
86450+
86451+ if (grsec_enable_ptrace_readexec && (unsafe_flags & LSM_UNSAFE_PTRACE) &&
86452+ (inode_permission(dentry->d_inode, MAY_READ) || !gr_acl_handle_open(dentry, mnt, MAY_READ))) {
86453+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_READEXEC_MSG, dentry, mnt);
86454+ return -EACCES;
86455+ }
86456+#endif
86457+ return 0;
86458+}
86459diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c
86460new file mode 100644
86461index 0000000..3860c7e
86462--- /dev/null
86463+++ b/grsecurity/grsec_sig.c
86464@@ -0,0 +1,236 @@
86465+#include <linux/kernel.h>
86466+#include <linux/sched.h>
86467+#include <linux/fs.h>
86468+#include <linux/delay.h>
86469+#include <linux/grsecurity.h>
86470+#include <linux/grinternal.h>
86471+#include <linux/hardirq.h>
86472+
86473+char *signames[] = {
86474+ [SIGSEGV] = "Segmentation fault",
86475+ [SIGILL] = "Illegal instruction",
86476+ [SIGABRT] = "Abort",
86477+ [SIGBUS] = "Invalid alignment/Bus error"
86478+};
86479+
86480+void
86481+gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
86482+{
86483+#ifdef CONFIG_GRKERNSEC_SIGNAL
86484+ if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
86485+ (sig == SIGABRT) || (sig == SIGBUS))) {
86486+ if (task_pid_nr(t) == task_pid_nr(current)) {
86487+ gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
86488+ } else {
86489+ gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
86490+ }
86491+ }
86492+#endif
86493+ return;
86494+}
86495+
86496+int
86497+gr_handle_signal(const struct task_struct *p, const int sig)
86498+{
86499+#ifdef CONFIG_GRKERNSEC
86500+ /* ignore the 0 signal for protected task checks */
86501+ if (task_pid_nr(current) > 1 && sig && gr_check_protected_task(p)) {
86502+ gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
86503+ return -EPERM;
86504+ } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
86505+ return -EPERM;
86506+ }
86507+#endif
86508+ return 0;
86509+}
86510+
86511+#ifdef CONFIG_GRKERNSEC
86512+extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
86513+
86514+int gr_fake_force_sig(int sig, struct task_struct *t)
86515+{
86516+ unsigned long int flags;
86517+ int ret, blocked, ignored;
86518+ struct k_sigaction *action;
86519+
86520+ spin_lock_irqsave(&t->sighand->siglock, flags);
86521+ action = &t->sighand->action[sig-1];
86522+ ignored = action->sa.sa_handler == SIG_IGN;
86523+ blocked = sigismember(&t->blocked, sig);
86524+ if (blocked || ignored) {
86525+ action->sa.sa_handler = SIG_DFL;
86526+ if (blocked) {
86527+ sigdelset(&t->blocked, sig);
86528+ recalc_sigpending_and_wake(t);
86529+ }
86530+ }
86531+ if (action->sa.sa_handler == SIG_DFL)
86532+ t->signal->flags &= ~SIGNAL_UNKILLABLE;
86533+ ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
86534+
86535+ spin_unlock_irqrestore(&t->sighand->siglock, flags);
86536+
86537+ return ret;
86538+}
86539+#endif
86540+
86541+#define GR_USER_BAN_TIME (15 * 60)
86542+#define GR_DAEMON_BRUTE_TIME (30 * 60)
86543+
86544+void gr_handle_brute_attach(int dumpable)
86545+{
86546+#ifdef CONFIG_GRKERNSEC_BRUTE
86547+ struct task_struct *p = current;
86548+ kuid_t uid = GLOBAL_ROOT_UID;
86549+ int daemon = 0;
86550+
86551+ if (!grsec_enable_brute)
86552+ return;
86553+
86554+ rcu_read_lock();
86555+ read_lock(&tasklist_lock);
86556+ read_lock(&grsec_exec_file_lock);
86557+ if (p->real_parent && gr_is_same_file(p->real_parent->exec_file, p->exec_file)) {
86558+ p->real_parent->brute_expires = get_seconds() + GR_DAEMON_BRUTE_TIME;
86559+ p->real_parent->brute = 1;
86560+ daemon = 1;
86561+ } else {
86562+ const struct cred *cred = __task_cred(p), *cred2;
86563+ struct task_struct *tsk, *tsk2;
86564+
86565+ if (dumpable != SUID_DUMP_USER && gr_is_global_nonroot(cred->uid)) {
86566+ struct user_struct *user;
86567+
86568+ uid = cred->uid;
86569+
86570+ /* this is put upon execution past expiration */
86571+ user = find_user(uid);
86572+ if (user == NULL)
86573+ goto unlock;
86574+ user->suid_banned = 1;
86575+ user->suid_ban_expires = get_seconds() + GR_USER_BAN_TIME;
86576+ if (user->suid_ban_expires == ~0UL)
86577+ user->suid_ban_expires--;
86578+
86579+ /* only kill other threads of the same binary, from the same user */
86580+ do_each_thread(tsk2, tsk) {
86581+ cred2 = __task_cred(tsk);
86582+ if (tsk != p && uid_eq(cred2->uid, uid) && gr_is_same_file(tsk->exec_file, p->exec_file))
86583+ gr_fake_force_sig(SIGKILL, tsk);
86584+ } while_each_thread(tsk2, tsk);
86585+ }
86586+ }
86587+unlock:
86588+ read_unlock(&grsec_exec_file_lock);
86589+ read_unlock(&tasklist_lock);
86590+ rcu_read_unlock();
86591+
86592+ if (gr_is_global_nonroot(uid))
86593+ gr_log_fs_int2(GR_DONT_AUDIT, GR_BRUTE_SUID_MSG, p->exec_file->f_path.dentry, p->exec_file->f_path.mnt, GR_GLOBAL_UID(uid), GR_USER_BAN_TIME / 60);
86594+ else if (daemon)
86595+ gr_log_noargs(GR_DONT_AUDIT, GR_BRUTE_DAEMON_MSG);
86596+
86597+#endif
86598+ return;
86599+}
86600+
86601+void gr_handle_brute_check(void)
86602+{
86603+#ifdef CONFIG_GRKERNSEC_BRUTE
86604+ struct task_struct *p = current;
86605+
86606+ if (unlikely(p->brute)) {
86607+ if (!grsec_enable_brute)
86608+ p->brute = 0;
86609+ else if (time_before(get_seconds(), p->brute_expires))
86610+ msleep(30 * 1000);
86611+ }
86612+#endif
86613+ return;
86614+}
86615+
86616+void gr_handle_kernel_exploit(void)
86617+{
86618+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
86619+ const struct cred *cred;
86620+ struct task_struct *tsk, *tsk2;
86621+ struct user_struct *user;
86622+ kuid_t uid;
86623+
86624+ if (in_irq() || in_serving_softirq() || in_nmi())
86625+ panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
86626+
86627+ uid = current_uid();
86628+
86629+ if (gr_is_global_root(uid))
86630+ panic("grsec: halting the system due to suspicious kernel crash caused by root");
86631+ else {
86632+ /* kill all the processes of this user, hold a reference
86633+ to their creds struct, and prevent them from creating
86634+ another process until system reset
86635+ */
86636+ printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n",
86637+ GR_GLOBAL_UID(uid));
86638+ /* we intentionally leak this ref */
86639+ user = get_uid(current->cred->user);
86640+ if (user)
86641+ user->kernel_banned = 1;
86642+
86643+ /* kill all processes of this user */
86644+ read_lock(&tasklist_lock);
86645+ do_each_thread(tsk2, tsk) {
86646+ cred = __task_cred(tsk);
86647+ if (uid_eq(cred->uid, uid))
86648+ gr_fake_force_sig(SIGKILL, tsk);
86649+ } while_each_thread(tsk2, tsk);
86650+ read_unlock(&tasklist_lock);
86651+ }
86652+#endif
86653+}
86654+
86655+#ifdef CONFIG_GRKERNSEC_BRUTE
86656+static bool suid_ban_expired(struct user_struct *user)
86657+{
86658+ if (user->suid_ban_expires != ~0UL && time_after_eq(get_seconds(), user->suid_ban_expires)) {
86659+ user->suid_banned = 0;
86660+ user->suid_ban_expires = 0;
86661+ free_uid(user);
86662+ return true;
86663+ }
86664+
86665+ return false;
86666+}
86667+#endif
86668+
86669+int gr_process_kernel_exec_ban(void)
86670+{
86671+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
86672+ if (unlikely(current->cred->user->kernel_banned))
86673+ return -EPERM;
86674+#endif
86675+ return 0;
86676+}
86677+
86678+int gr_process_kernel_setuid_ban(struct user_struct *user)
86679+{
86680+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
86681+ if (unlikely(user->kernel_banned))
86682+ gr_fake_force_sig(SIGKILL, current);
86683+#endif
86684+ return 0;
86685+}
86686+
86687+int gr_process_suid_exec_ban(const struct linux_binprm *bprm)
86688+{
86689+#ifdef CONFIG_GRKERNSEC_BRUTE
86690+ struct user_struct *user = current->cred->user;
86691+ if (unlikely(user->suid_banned)) {
86692+ if (suid_ban_expired(user))
86693+ return 0;
86694+ /* disallow execution of suid binaries only */
86695+ else if (!uid_eq(bprm->cred->euid, current->cred->uid))
86696+ return -EPERM;
86697+ }
86698+#endif
86699+ return 0;
86700+}
86701diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
86702new file mode 100644
86703index 0000000..a523bd2
86704--- /dev/null
86705+++ b/grsecurity/grsec_sock.c
86706@@ -0,0 +1,244 @@
86707+#include <linux/kernel.h>
86708+#include <linux/module.h>
86709+#include <linux/sched.h>
86710+#include <linux/file.h>
86711+#include <linux/net.h>
86712+#include <linux/in.h>
86713+#include <linux/ip.h>
86714+#include <net/sock.h>
86715+#include <net/inet_sock.h>
86716+#include <linux/grsecurity.h>
86717+#include <linux/grinternal.h>
86718+#include <linux/gracl.h>
86719+
86720+extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
86721+extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
86722+
86723+EXPORT_SYMBOL_GPL(gr_search_udp_recvmsg);
86724+EXPORT_SYMBOL_GPL(gr_search_udp_sendmsg);
86725+
86726+#ifdef CONFIG_UNIX_MODULE
86727+EXPORT_SYMBOL_GPL(gr_acl_handle_unix);
86728+EXPORT_SYMBOL_GPL(gr_acl_handle_mknod);
86729+EXPORT_SYMBOL_GPL(gr_handle_chroot_unix);
86730+EXPORT_SYMBOL_GPL(gr_handle_create);
86731+#endif
86732+
86733+#ifdef CONFIG_GRKERNSEC
86734+#define gr_conn_table_size 32749
86735+struct conn_table_entry {
86736+ struct conn_table_entry *next;
86737+ struct signal_struct *sig;
86738+};
86739+
86740+struct conn_table_entry *gr_conn_table[gr_conn_table_size];
86741+DEFINE_SPINLOCK(gr_conn_table_lock);
86742+
86743+extern const char * gr_socktype_to_name(unsigned char type);
86744+extern const char * gr_proto_to_name(unsigned char proto);
86745+extern const char * gr_sockfamily_to_name(unsigned char family);
86746+
86747+static int
86748+conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
86749+{
86750+ return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
86751+}
86752+
86753+static int
86754+conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
86755+ __u16 sport, __u16 dport)
86756+{
86757+ if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
86758+ sig->gr_sport == sport && sig->gr_dport == dport))
86759+ return 1;
86760+ else
86761+ return 0;
86762+}
86763+
86764+static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
86765+{
86766+ struct conn_table_entry **match;
86767+ unsigned int index;
86768+
86769+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
86770+ sig->gr_sport, sig->gr_dport,
86771+ gr_conn_table_size);
86772+
86773+ newent->sig = sig;
86774+
86775+ match = &gr_conn_table[index];
86776+ newent->next = *match;
86777+ *match = newent;
86778+
86779+ return;
86780+}
86781+
86782+static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
86783+{
86784+ struct conn_table_entry *match, *last = NULL;
86785+ unsigned int index;
86786+
86787+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
86788+ sig->gr_sport, sig->gr_dport,
86789+ gr_conn_table_size);
86790+
86791+ match = gr_conn_table[index];
86792+ while (match && !conn_match(match->sig,
86793+ sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
86794+ sig->gr_dport)) {
86795+ last = match;
86796+ match = match->next;
86797+ }
86798+
86799+ if (match) {
86800+ if (last)
86801+ last->next = match->next;
86802+ else
86803+ gr_conn_table[index] = NULL;
86804+ kfree(match);
86805+ }
86806+
86807+ return;
86808+}
86809+
86810+static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
86811+ __u16 sport, __u16 dport)
86812+{
86813+ struct conn_table_entry *match;
86814+ unsigned int index;
86815+
86816+ index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
86817+
86818+ match = gr_conn_table[index];
86819+ while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
86820+ match = match->next;
86821+
86822+ if (match)
86823+ return match->sig;
86824+ else
86825+ return NULL;
86826+}
86827+
86828+#endif
86829+
86830+void gr_update_task_in_ip_table(const struct inet_sock *inet)
86831+{
86832+#ifdef CONFIG_GRKERNSEC
86833+ struct signal_struct *sig = current->signal;
86834+ struct conn_table_entry *newent;
86835+
86836+ newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
86837+ if (newent == NULL)
86838+ return;
86839+ /* no bh lock needed since we are called with bh disabled */
86840+ spin_lock(&gr_conn_table_lock);
86841+ gr_del_task_from_ip_table_nolock(sig);
86842+ sig->gr_saddr = inet->inet_rcv_saddr;
86843+ sig->gr_daddr = inet->inet_daddr;
86844+ sig->gr_sport = inet->inet_sport;
86845+ sig->gr_dport = inet->inet_dport;
86846+ gr_add_to_task_ip_table_nolock(sig, newent);
86847+ spin_unlock(&gr_conn_table_lock);
86848+#endif
86849+ return;
86850+}
86851+
86852+void gr_del_task_from_ip_table(struct task_struct *task)
86853+{
86854+#ifdef CONFIG_GRKERNSEC
86855+ spin_lock_bh(&gr_conn_table_lock);
86856+ gr_del_task_from_ip_table_nolock(task->signal);
86857+ spin_unlock_bh(&gr_conn_table_lock);
86858+#endif
86859+ return;
86860+}
86861+
86862+void
86863+gr_attach_curr_ip(const struct sock *sk)
86864+{
86865+#ifdef CONFIG_GRKERNSEC
86866+ struct signal_struct *p, *set;
86867+ const struct inet_sock *inet = inet_sk(sk);
86868+
86869+ if (unlikely(sk->sk_protocol != IPPROTO_TCP))
86870+ return;
86871+
86872+ set = current->signal;
86873+
86874+ spin_lock_bh(&gr_conn_table_lock);
86875+ p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
86876+ inet->inet_dport, inet->inet_sport);
86877+ if (unlikely(p != NULL)) {
86878+ set->curr_ip = p->curr_ip;
86879+ set->used_accept = 1;
86880+ gr_del_task_from_ip_table_nolock(p);
86881+ spin_unlock_bh(&gr_conn_table_lock);
86882+ return;
86883+ }
86884+ spin_unlock_bh(&gr_conn_table_lock);
86885+
86886+ set->curr_ip = inet->inet_daddr;
86887+ set->used_accept = 1;
86888+#endif
86889+ return;
86890+}
86891+
86892+int
86893+gr_handle_sock_all(const int family, const int type, const int protocol)
86894+{
86895+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
86896+ if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
86897+ (family != AF_UNIX)) {
86898+ if (family == AF_INET)
86899+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
86900+ else
86901+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
86902+ return -EACCES;
86903+ }
86904+#endif
86905+ return 0;
86906+}
86907+
86908+int
86909+gr_handle_sock_server(const struct sockaddr *sck)
86910+{
86911+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
86912+ if (grsec_enable_socket_server &&
86913+ in_group_p(grsec_socket_server_gid) &&
86914+ sck && (sck->sa_family != AF_UNIX) &&
86915+ (sck->sa_family != AF_LOCAL)) {
86916+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
86917+ return -EACCES;
86918+ }
86919+#endif
86920+ return 0;
86921+}
86922+
86923+int
86924+gr_handle_sock_server_other(const struct sock *sck)
86925+{
86926+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
86927+ if (grsec_enable_socket_server &&
86928+ in_group_p(grsec_socket_server_gid) &&
86929+ sck && (sck->sk_family != AF_UNIX) &&
86930+ (sck->sk_family != AF_LOCAL)) {
86931+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
86932+ return -EACCES;
86933+ }
86934+#endif
86935+ return 0;
86936+}
86937+
86938+int
86939+gr_handle_sock_client(const struct sockaddr *sck)
86940+{
86941+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
86942+ if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
86943+ sck && (sck->sa_family != AF_UNIX) &&
86944+ (sck->sa_family != AF_LOCAL)) {
86945+ gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
86946+ return -EACCES;
86947+ }
86948+#endif
86949+ return 0;
86950+}
86951diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
86952new file mode 100644
86953index 0000000..cce889e
86954--- /dev/null
86955+++ b/grsecurity/grsec_sysctl.c
86956@@ -0,0 +1,488 @@
86957+#include <linux/kernel.h>
86958+#include <linux/sched.h>
86959+#include <linux/sysctl.h>
86960+#include <linux/grsecurity.h>
86961+#include <linux/grinternal.h>
86962+
86963+int
86964+gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
86965+{
86966+#ifdef CONFIG_GRKERNSEC_SYSCTL
86967+ if (dirname == NULL || name == NULL)
86968+ return 0;
86969+ if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
86970+ gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
86971+ return -EACCES;
86972+ }
86973+#endif
86974+ return 0;
86975+}
86976+
86977+#if defined(CONFIG_GRKERNSEC_ROFS) || defined(CONFIG_GRKERNSEC_DENYUSB)
86978+static int __maybe_unused __read_only one = 1;
86979+#endif
86980+
86981+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS) || \
86982+ defined(CONFIG_GRKERNSEC_DENYUSB)
86983+struct ctl_table grsecurity_table[] = {
86984+#ifdef CONFIG_GRKERNSEC_SYSCTL
86985+#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
86986+#ifdef CONFIG_GRKERNSEC_IO
86987+ {
86988+ .procname = "disable_priv_io",
86989+ .data = &grsec_disable_privio,
86990+ .maxlen = sizeof(int),
86991+ .mode = 0600,
86992+ .proc_handler = &proc_dointvec,
86993+ },
86994+#endif
86995+#endif
86996+#ifdef CONFIG_GRKERNSEC_LINK
86997+ {
86998+ .procname = "linking_restrictions",
86999+ .data = &grsec_enable_link,
87000+ .maxlen = sizeof(int),
87001+ .mode = 0600,
87002+ .proc_handler = &proc_dointvec,
87003+ },
87004+#endif
87005+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
87006+ {
87007+ .procname = "enforce_symlinksifowner",
87008+ .data = &grsec_enable_symlinkown,
87009+ .maxlen = sizeof(int),
87010+ .mode = 0600,
87011+ .proc_handler = &proc_dointvec,
87012+ },
87013+ {
87014+ .procname = "symlinkown_gid",
87015+ .data = &grsec_symlinkown_gid,
87016+ .maxlen = sizeof(int),
87017+ .mode = 0600,
87018+ .proc_handler = &proc_dointvec,
87019+ },
87020+#endif
87021+#ifdef CONFIG_GRKERNSEC_BRUTE
87022+ {
87023+ .procname = "deter_bruteforce",
87024+ .data = &grsec_enable_brute,
87025+ .maxlen = sizeof(int),
87026+ .mode = 0600,
87027+ .proc_handler = &proc_dointvec,
87028+ },
87029+#endif
87030+#ifdef CONFIG_GRKERNSEC_FIFO
87031+ {
87032+ .procname = "fifo_restrictions",
87033+ .data = &grsec_enable_fifo,
87034+ .maxlen = sizeof(int),
87035+ .mode = 0600,
87036+ .proc_handler = &proc_dointvec,
87037+ },
87038+#endif
87039+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
87040+ {
87041+ .procname = "ptrace_readexec",
87042+ .data = &grsec_enable_ptrace_readexec,
87043+ .maxlen = sizeof(int),
87044+ .mode = 0600,
87045+ .proc_handler = &proc_dointvec,
87046+ },
87047+#endif
87048+#ifdef CONFIG_GRKERNSEC_SETXID
87049+ {
87050+ .procname = "consistent_setxid",
87051+ .data = &grsec_enable_setxid,
87052+ .maxlen = sizeof(int),
87053+ .mode = 0600,
87054+ .proc_handler = &proc_dointvec,
87055+ },
87056+#endif
87057+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
87058+ {
87059+ .procname = "ip_blackhole",
87060+ .data = &grsec_enable_blackhole,
87061+ .maxlen = sizeof(int),
87062+ .mode = 0600,
87063+ .proc_handler = &proc_dointvec,
87064+ },
87065+ {
87066+ .procname = "lastack_retries",
87067+ .data = &grsec_lastack_retries,
87068+ .maxlen = sizeof(int),
87069+ .mode = 0600,
87070+ .proc_handler = &proc_dointvec,
87071+ },
87072+#endif
87073+#ifdef CONFIG_GRKERNSEC_EXECLOG
87074+ {
87075+ .procname = "exec_logging",
87076+ .data = &grsec_enable_execlog,
87077+ .maxlen = sizeof(int),
87078+ .mode = 0600,
87079+ .proc_handler = &proc_dointvec,
87080+ },
87081+#endif
87082+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
87083+ {
87084+ .procname = "rwxmap_logging",
87085+ .data = &grsec_enable_log_rwxmaps,
87086+ .maxlen = sizeof(int),
87087+ .mode = 0600,
87088+ .proc_handler = &proc_dointvec,
87089+ },
87090+#endif
87091+#ifdef CONFIG_GRKERNSEC_SIGNAL
87092+ {
87093+ .procname = "signal_logging",
87094+ .data = &grsec_enable_signal,
87095+ .maxlen = sizeof(int),
87096+ .mode = 0600,
87097+ .proc_handler = &proc_dointvec,
87098+ },
87099+#endif
87100+#ifdef CONFIG_GRKERNSEC_FORKFAIL
87101+ {
87102+ .procname = "forkfail_logging",
87103+ .data = &grsec_enable_forkfail,
87104+ .maxlen = sizeof(int),
87105+ .mode = 0600,
87106+ .proc_handler = &proc_dointvec,
87107+ },
87108+#endif
87109+#ifdef CONFIG_GRKERNSEC_TIME
87110+ {
87111+ .procname = "timechange_logging",
87112+ .data = &grsec_enable_time,
87113+ .maxlen = sizeof(int),
87114+ .mode = 0600,
87115+ .proc_handler = &proc_dointvec,
87116+ },
87117+#endif
87118+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
87119+ {
87120+ .procname = "chroot_deny_shmat",
87121+ .data = &grsec_enable_chroot_shmat,
87122+ .maxlen = sizeof(int),
87123+ .mode = 0600,
87124+ .proc_handler = &proc_dointvec,
87125+ },
87126+#endif
87127+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
87128+ {
87129+ .procname = "chroot_deny_unix",
87130+ .data = &grsec_enable_chroot_unix,
87131+ .maxlen = sizeof(int),
87132+ .mode = 0600,
87133+ .proc_handler = &proc_dointvec,
87134+ },
87135+#endif
87136+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
87137+ {
87138+ .procname = "chroot_deny_mount",
87139+ .data = &grsec_enable_chroot_mount,
87140+ .maxlen = sizeof(int),
87141+ .mode = 0600,
87142+ .proc_handler = &proc_dointvec,
87143+ },
87144+#endif
87145+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
87146+ {
87147+ .procname = "chroot_deny_fchdir",
87148+ .data = &grsec_enable_chroot_fchdir,
87149+ .maxlen = sizeof(int),
87150+ .mode = 0600,
87151+ .proc_handler = &proc_dointvec,
87152+ },
87153+#endif
87154+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
87155+ {
87156+ .procname = "chroot_deny_chroot",
87157+ .data = &grsec_enable_chroot_double,
87158+ .maxlen = sizeof(int),
87159+ .mode = 0600,
87160+ .proc_handler = &proc_dointvec,
87161+ },
87162+#endif
87163+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
87164+ {
87165+ .procname = "chroot_deny_pivot",
87166+ .data = &grsec_enable_chroot_pivot,
87167+ .maxlen = sizeof(int),
87168+ .mode = 0600,
87169+ .proc_handler = &proc_dointvec,
87170+ },
87171+#endif
87172+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
87173+ {
87174+ .procname = "chroot_enforce_chdir",
87175+ .data = &grsec_enable_chroot_chdir,
87176+ .maxlen = sizeof(int),
87177+ .mode = 0600,
87178+ .proc_handler = &proc_dointvec,
87179+ },
87180+#endif
87181+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
87182+ {
87183+ .procname = "chroot_deny_chmod",
87184+ .data = &grsec_enable_chroot_chmod,
87185+ .maxlen = sizeof(int),
87186+ .mode = 0600,
87187+ .proc_handler = &proc_dointvec,
87188+ },
87189+#endif
87190+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
87191+ {
87192+ .procname = "chroot_deny_mknod",
87193+ .data = &grsec_enable_chroot_mknod,
87194+ .maxlen = sizeof(int),
87195+ .mode = 0600,
87196+ .proc_handler = &proc_dointvec,
87197+ },
87198+#endif
87199+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
87200+ {
87201+ .procname = "chroot_restrict_nice",
87202+ .data = &grsec_enable_chroot_nice,
87203+ .maxlen = sizeof(int),
87204+ .mode = 0600,
87205+ .proc_handler = &proc_dointvec,
87206+ },
87207+#endif
87208+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
87209+ {
87210+ .procname = "chroot_execlog",
87211+ .data = &grsec_enable_chroot_execlog,
87212+ .maxlen = sizeof(int),
87213+ .mode = 0600,
87214+ .proc_handler = &proc_dointvec,
87215+ },
87216+#endif
87217+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
87218+ {
87219+ .procname = "chroot_caps",
87220+ .data = &grsec_enable_chroot_caps,
87221+ .maxlen = sizeof(int),
87222+ .mode = 0600,
87223+ .proc_handler = &proc_dointvec,
87224+ },
87225+#endif
87226+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
87227+ {
87228+ .procname = "chroot_deny_bad_rename",
87229+ .data = &grsec_enable_chroot_rename,
87230+ .maxlen = sizeof(int),
87231+ .mode = 0600,
87232+ .proc_handler = &proc_dointvec,
87233+ },
87234+#endif
87235+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
87236+ {
87237+ .procname = "chroot_deny_sysctl",
87238+ .data = &grsec_enable_chroot_sysctl,
87239+ .maxlen = sizeof(int),
87240+ .mode = 0600,
87241+ .proc_handler = &proc_dointvec,
87242+ },
87243+#endif
87244+#ifdef CONFIG_GRKERNSEC_TPE
87245+ {
87246+ .procname = "tpe",
87247+ .data = &grsec_enable_tpe,
87248+ .maxlen = sizeof(int),
87249+ .mode = 0600,
87250+ .proc_handler = &proc_dointvec,
87251+ },
87252+ {
87253+ .procname = "tpe_gid",
87254+ .data = &grsec_tpe_gid,
87255+ .maxlen = sizeof(int),
87256+ .mode = 0600,
87257+ .proc_handler = &proc_dointvec,
87258+ },
87259+#endif
87260+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
87261+ {
87262+ .procname = "tpe_invert",
87263+ .data = &grsec_enable_tpe_invert,
87264+ .maxlen = sizeof(int),
87265+ .mode = 0600,
87266+ .proc_handler = &proc_dointvec,
87267+ },
87268+#endif
87269+#ifdef CONFIG_GRKERNSEC_TPE_ALL
87270+ {
87271+ .procname = "tpe_restrict_all",
87272+ .data = &grsec_enable_tpe_all,
87273+ .maxlen = sizeof(int),
87274+ .mode = 0600,
87275+ .proc_handler = &proc_dointvec,
87276+ },
87277+#endif
87278+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
87279+ {
87280+ .procname = "socket_all",
87281+ .data = &grsec_enable_socket_all,
87282+ .maxlen = sizeof(int),
87283+ .mode = 0600,
87284+ .proc_handler = &proc_dointvec,
87285+ },
87286+ {
87287+ .procname = "socket_all_gid",
87288+ .data = &grsec_socket_all_gid,
87289+ .maxlen = sizeof(int),
87290+ .mode = 0600,
87291+ .proc_handler = &proc_dointvec,
87292+ },
87293+#endif
87294+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
87295+ {
87296+ .procname = "socket_client",
87297+ .data = &grsec_enable_socket_client,
87298+ .maxlen = sizeof(int),
87299+ .mode = 0600,
87300+ .proc_handler = &proc_dointvec,
87301+ },
87302+ {
87303+ .procname = "socket_client_gid",
87304+ .data = &grsec_socket_client_gid,
87305+ .maxlen = sizeof(int),
87306+ .mode = 0600,
87307+ .proc_handler = &proc_dointvec,
87308+ },
87309+#endif
87310+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
87311+ {
87312+ .procname = "socket_server",
87313+ .data = &grsec_enable_socket_server,
87314+ .maxlen = sizeof(int),
87315+ .mode = 0600,
87316+ .proc_handler = &proc_dointvec,
87317+ },
87318+ {
87319+ .procname = "socket_server_gid",
87320+ .data = &grsec_socket_server_gid,
87321+ .maxlen = sizeof(int),
87322+ .mode = 0600,
87323+ .proc_handler = &proc_dointvec,
87324+ },
87325+#endif
87326+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
87327+ {
87328+ .procname = "audit_group",
87329+ .data = &grsec_enable_group,
87330+ .maxlen = sizeof(int),
87331+ .mode = 0600,
87332+ .proc_handler = &proc_dointvec,
87333+ },
87334+ {
87335+ .procname = "audit_gid",
87336+ .data = &grsec_audit_gid,
87337+ .maxlen = sizeof(int),
87338+ .mode = 0600,
87339+ .proc_handler = &proc_dointvec,
87340+ },
87341+#endif
87342+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
87343+ {
87344+ .procname = "audit_chdir",
87345+ .data = &grsec_enable_chdir,
87346+ .maxlen = sizeof(int),
87347+ .mode = 0600,
87348+ .proc_handler = &proc_dointvec,
87349+ },
87350+#endif
87351+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
87352+ {
87353+ .procname = "audit_mount",
87354+ .data = &grsec_enable_mount,
87355+ .maxlen = sizeof(int),
87356+ .mode = 0600,
87357+ .proc_handler = &proc_dointvec,
87358+ },
87359+#endif
87360+#ifdef CONFIG_GRKERNSEC_DMESG
87361+ {
87362+ .procname = "dmesg",
87363+ .data = &grsec_enable_dmesg,
87364+ .maxlen = sizeof(int),
87365+ .mode = 0600,
87366+ .proc_handler = &proc_dointvec,
87367+ },
87368+#endif
87369+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
87370+ {
87371+ .procname = "chroot_findtask",
87372+ .data = &grsec_enable_chroot_findtask,
87373+ .maxlen = sizeof(int),
87374+ .mode = 0600,
87375+ .proc_handler = &proc_dointvec,
87376+ },
87377+#endif
87378+#ifdef CONFIG_GRKERNSEC_RESLOG
87379+ {
87380+ .procname = "resource_logging",
87381+ .data = &grsec_resource_logging,
87382+ .maxlen = sizeof(int),
87383+ .mode = 0600,
87384+ .proc_handler = &proc_dointvec,
87385+ },
87386+#endif
87387+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
87388+ {
87389+ .procname = "audit_ptrace",
87390+ .data = &grsec_enable_audit_ptrace,
87391+ .maxlen = sizeof(int),
87392+ .mode = 0600,
87393+ .proc_handler = &proc_dointvec,
87394+ },
87395+#endif
87396+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
87397+ {
87398+ .procname = "harden_ptrace",
87399+ .data = &grsec_enable_harden_ptrace,
87400+ .maxlen = sizeof(int),
87401+ .mode = 0600,
87402+ .proc_handler = &proc_dointvec,
87403+ },
87404+#endif
87405+#ifdef CONFIG_GRKERNSEC_HARDEN_IPC
87406+ {
87407+ .procname = "harden_ipc",
87408+ .data = &grsec_enable_harden_ipc,
87409+ .maxlen = sizeof(int),
87410+ .mode = 0600,
87411+ .proc_handler = &proc_dointvec,
87412+ },
87413+#endif
87414+ {
87415+ .procname = "grsec_lock",
87416+ .data = &grsec_lock,
87417+ .maxlen = sizeof(int),
87418+ .mode = 0600,
87419+ .proc_handler = &proc_dointvec,
87420+ },
87421+#endif
87422+#ifdef CONFIG_GRKERNSEC_ROFS
87423+ {
87424+ .procname = "romount_protect",
87425+ .data = &grsec_enable_rofs,
87426+ .maxlen = sizeof(int),
87427+ .mode = 0600,
87428+ .proc_handler = &proc_dointvec_minmax,
87429+ .extra1 = &one,
87430+ .extra2 = &one,
87431+ },
87432+#endif
87433+#if defined(CONFIG_GRKERNSEC_DENYUSB) && !defined(CONFIG_GRKERNSEC_DENYUSB_FORCE)
87434+ {
87435+ .procname = "deny_new_usb",
87436+ .data = &grsec_deny_new_usb,
87437+ .maxlen = sizeof(int),
87438+ .mode = 0600,
87439+ .proc_handler = &proc_dointvec,
87440+ },
87441+#endif
87442+ { }
87443+};
87444+#endif
87445diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c
87446new file mode 100644
87447index 0000000..61b514e
87448--- /dev/null
87449+++ b/grsecurity/grsec_time.c
87450@@ -0,0 +1,16 @@
87451+#include <linux/kernel.h>
87452+#include <linux/sched.h>
87453+#include <linux/grinternal.h>
87454+#include <linux/module.h>
87455+
87456+void
87457+gr_log_timechange(void)
87458+{
87459+#ifdef CONFIG_GRKERNSEC_TIME
87460+ if (grsec_enable_time)
87461+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
87462+#endif
87463+ return;
87464+}
87465+
87466+EXPORT_SYMBOL_GPL(gr_log_timechange);
87467diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c
87468new file mode 100644
87469index 0000000..d1953de
87470--- /dev/null
87471+++ b/grsecurity/grsec_tpe.c
87472@@ -0,0 +1,78 @@
87473+#include <linux/kernel.h>
87474+#include <linux/sched.h>
87475+#include <linux/file.h>
87476+#include <linux/fs.h>
87477+#include <linux/grinternal.h>
87478+
87479+extern int gr_acl_tpe_check(void);
87480+
87481+int
87482+gr_tpe_allow(const struct file *file)
87483+{
87484+#ifdef CONFIG_GRKERNSEC
87485+ struct inode *inode = file->f_path.dentry->d_parent->d_inode;
87486+ struct inode *file_inode = file->f_path.dentry->d_inode;
87487+ const struct cred *cred = current_cred();
87488+ char *msg = NULL;
87489+ char *msg2 = NULL;
87490+
87491+ // never restrict root
87492+ if (gr_is_global_root(cred->uid))
87493+ return 1;
87494+
87495+ if (grsec_enable_tpe) {
87496+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
87497+ if (grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid))
87498+ msg = "not being in trusted group";
87499+ else if (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid))
87500+ msg = "being in untrusted group";
87501+#else
87502+ if (in_group_p(grsec_tpe_gid))
87503+ msg = "being in untrusted group";
87504+#endif
87505+ }
87506+ if (!msg && gr_acl_tpe_check())
87507+ msg = "being in untrusted role";
87508+
87509+ // not in any affected group/role
87510+ if (!msg)
87511+ goto next_check;
87512+
87513+ if (gr_is_global_nonroot(inode->i_uid))
87514+ msg2 = "file in non-root-owned directory";
87515+ else if (inode->i_mode & S_IWOTH)
87516+ msg2 = "file in world-writable directory";
87517+ else if (inode->i_mode & S_IWGRP)
87518+ msg2 = "file in group-writable directory";
87519+ else if (file_inode->i_mode & S_IWOTH)
87520+ msg2 = "file is world-writable";
87521+
87522+ if (msg && msg2) {
87523+ char fullmsg[70] = {0};
87524+ snprintf(fullmsg, sizeof(fullmsg)-1, "%s and %s", msg, msg2);
87525+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, fullmsg, file->f_path.dentry, file->f_path.mnt);
87526+ return 0;
87527+ }
87528+ msg = NULL;
87529+next_check:
87530+#ifdef CONFIG_GRKERNSEC_TPE_ALL
87531+ if (!grsec_enable_tpe || !grsec_enable_tpe_all)
87532+ return 1;
87533+
87534+ if (gr_is_global_nonroot(inode->i_uid) && !uid_eq(inode->i_uid, cred->uid))
87535+ msg = "directory not owned by user";
87536+ else if (inode->i_mode & S_IWOTH)
87537+ msg = "file in world-writable directory";
87538+ else if (inode->i_mode & S_IWGRP)
87539+ msg = "file in group-writable directory";
87540+ else if (file_inode->i_mode & S_IWOTH)
87541+ msg = "file is world-writable";
87542+
87543+ if (msg) {
87544+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, msg, file->f_path.dentry, file->f_path.mnt);
87545+ return 0;
87546+ }
87547+#endif
87548+#endif
87549+ return 1;
87550+}
87551diff --git a/grsecurity/grsec_usb.c b/grsecurity/grsec_usb.c
87552new file mode 100644
87553index 0000000..ae02d8e
87554--- /dev/null
87555+++ b/grsecurity/grsec_usb.c
87556@@ -0,0 +1,15 @@
87557+#include <linux/kernel.h>
87558+#include <linux/grinternal.h>
87559+#include <linux/module.h>
87560+
87561+int gr_handle_new_usb(void)
87562+{
87563+#ifdef CONFIG_GRKERNSEC_DENYUSB
87564+ if (grsec_deny_new_usb) {
87565+ printk(KERN_ALERT "grsec: denied insert of new USB device\n");
87566+ return 1;
87567+ }
87568+#endif
87569+ return 0;
87570+}
87571+EXPORT_SYMBOL_GPL(gr_handle_new_usb);
87572diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c
87573new file mode 100644
87574index 0000000..158b330
87575--- /dev/null
87576+++ b/grsecurity/grsum.c
87577@@ -0,0 +1,64 @@
87578+#include <linux/err.h>
87579+#include <linux/kernel.h>
87580+#include <linux/sched.h>
87581+#include <linux/mm.h>
87582+#include <linux/scatterlist.h>
87583+#include <linux/crypto.h>
87584+#include <linux/gracl.h>
87585+
87586+
87587+#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
87588+#error "crypto and sha256 must be built into the kernel"
87589+#endif
87590+
87591+int
87592+chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
87593+{
87594+ struct crypto_hash *tfm;
87595+ struct hash_desc desc;
87596+ struct scatterlist sg[2];
87597+ unsigned char temp_sum[GR_SHA_LEN] __attribute__((aligned(__alignof__(unsigned long))));
87598+ unsigned long *tmpsumptr = (unsigned long *)temp_sum;
87599+ unsigned long *sumptr = (unsigned long *)sum;
87600+ int cryptres;
87601+ int retval = 1;
87602+ volatile int mismatched = 0;
87603+ volatile int dummy = 0;
87604+ unsigned int i;
87605+
87606+ tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
87607+ if (IS_ERR(tfm)) {
87608+ /* should never happen, since sha256 should be built in */
87609+ memset(entry->pw, 0, GR_PW_LEN);
87610+ return 1;
87611+ }
87612+
87613+ sg_init_table(sg, 2);
87614+ sg_set_buf(&sg[0], salt, GR_SALT_LEN);
87615+ sg_set_buf(&sg[1], entry->pw, strlen(entry->pw));
87616+
87617+ desc.tfm = tfm;
87618+ desc.flags = 0;
87619+
87620+ cryptres = crypto_hash_digest(&desc, sg, GR_SALT_LEN + strlen(entry->pw),
87621+ temp_sum);
87622+
87623+ memset(entry->pw, 0, GR_PW_LEN);
87624+
87625+ if (cryptres)
87626+ goto out;
87627+
87628+ for (i = 0; i < GR_SHA_LEN/sizeof(tmpsumptr[0]); i++)
87629+ if (sumptr[i] != tmpsumptr[i])
87630+ mismatched = 1;
87631+ else
87632+ dummy = 1; // waste a cycle
87633+
87634+ if (!mismatched)
87635+ retval = dummy - 1;
87636+
87637+out:
87638+ crypto_free_hash(tfm);
87639+
87640+ return retval;
87641+}
87642diff --git a/include/asm-generic/4level-fixup.h b/include/asm-generic/4level-fixup.h
87643index 5bdab6b..9ae82fe 100644
87644--- a/include/asm-generic/4level-fixup.h
87645+++ b/include/asm-generic/4level-fixup.h
87646@@ -14,8 +14,10 @@
87647 #define pmd_alloc(mm, pud, address) \
87648 ((unlikely(pgd_none(*(pud))) && __pmd_alloc(mm, pud, address))? \
87649 NULL: pmd_offset(pud, address))
87650+#define pmd_alloc_kernel(mm, pud, address) pmd_alloc((mm), (pud), (address))
87651
87652 #define pud_alloc(mm, pgd, address) (pgd)
87653+#define pud_alloc_kernel(mm, pgd, address) pud_alloc((mm), (pgd), (address))
87654 #define pud_offset(pgd, start) (pgd)
87655 #define pud_none(pud) 0
87656 #define pud_bad(pud) 0
87657diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
87658index b7babf0..1e4b4f1 100644
87659--- a/include/asm-generic/atomic-long.h
87660+++ b/include/asm-generic/atomic-long.h
87661@@ -22,6 +22,12 @@
87662
87663 typedef atomic64_t atomic_long_t;
87664
87665+#ifdef CONFIG_PAX_REFCOUNT
87666+typedef atomic64_unchecked_t atomic_long_unchecked_t;
87667+#else
87668+typedef atomic64_t atomic_long_unchecked_t;
87669+#endif
87670+
87671 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
87672
87673 static inline long atomic_long_read(atomic_long_t *l)
87674@@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l)
87675 return (long)atomic64_read(v);
87676 }
87677
87678+#ifdef CONFIG_PAX_REFCOUNT
87679+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
87680+{
87681+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
87682+
87683+ return (long)atomic64_read_unchecked(v);
87684+}
87685+#endif
87686+
87687 static inline void atomic_long_set(atomic_long_t *l, long i)
87688 {
87689 atomic64_t *v = (atomic64_t *)l;
87690@@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
87691 atomic64_set(v, i);
87692 }
87693
87694+#ifdef CONFIG_PAX_REFCOUNT
87695+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
87696+{
87697+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
87698+
87699+ atomic64_set_unchecked(v, i);
87700+}
87701+#endif
87702+
87703 static inline void atomic_long_inc(atomic_long_t *l)
87704 {
87705 atomic64_t *v = (atomic64_t *)l;
87706@@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
87707 atomic64_inc(v);
87708 }
87709
87710+#ifdef CONFIG_PAX_REFCOUNT
87711+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
87712+{
87713+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
87714+
87715+ atomic64_inc_unchecked(v);
87716+}
87717+#endif
87718+
87719 static inline void atomic_long_dec(atomic_long_t *l)
87720 {
87721 atomic64_t *v = (atomic64_t *)l;
87722@@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
87723 atomic64_dec(v);
87724 }
87725
87726+#ifdef CONFIG_PAX_REFCOUNT
87727+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
87728+{
87729+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
87730+
87731+ atomic64_dec_unchecked(v);
87732+}
87733+#endif
87734+
87735 static inline void atomic_long_add(long i, atomic_long_t *l)
87736 {
87737 atomic64_t *v = (atomic64_t *)l;
87738@@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
87739 atomic64_add(i, v);
87740 }
87741
87742+#ifdef CONFIG_PAX_REFCOUNT
87743+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
87744+{
87745+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
87746+
87747+ atomic64_add_unchecked(i, v);
87748+}
87749+#endif
87750+
87751 static inline void atomic_long_sub(long i, atomic_long_t *l)
87752 {
87753 atomic64_t *v = (atomic64_t *)l;
87754@@ -66,6 +117,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
87755 atomic64_sub(i, v);
87756 }
87757
87758+#ifdef CONFIG_PAX_REFCOUNT
87759+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
87760+{
87761+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
87762+
87763+ atomic64_sub_unchecked(i, v);
87764+}
87765+#endif
87766+
87767 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
87768 {
87769 atomic64_t *v = (atomic64_t *)l;
87770@@ -94,13 +154,22 @@ static inline int atomic_long_add_negative(long i, atomic_long_t *l)
87771 return atomic64_add_negative(i, v);
87772 }
87773
87774-static inline long atomic_long_add_return(long i, atomic_long_t *l)
87775+static inline long __intentional_overflow(-1) atomic_long_add_return(long i, atomic_long_t *l)
87776 {
87777 atomic64_t *v = (atomic64_t *)l;
87778
87779 return (long)atomic64_add_return(i, v);
87780 }
87781
87782+#ifdef CONFIG_PAX_REFCOUNT
87783+static inline long atomic_long_add_return_unchecked(long i, atomic_long_unchecked_t *l)
87784+{
87785+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
87786+
87787+ return (long)atomic64_add_return_unchecked(i, v);
87788+}
87789+#endif
87790+
87791 static inline long atomic_long_sub_return(long i, atomic_long_t *l)
87792 {
87793 atomic64_t *v = (atomic64_t *)l;
87794@@ -115,6 +184,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
87795 return (long)atomic64_inc_return(v);
87796 }
87797
87798+#ifdef CONFIG_PAX_REFCOUNT
87799+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
87800+{
87801+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
87802+
87803+ return (long)atomic64_inc_return_unchecked(v);
87804+}
87805+#endif
87806+
87807 static inline long atomic_long_dec_return(atomic_long_t *l)
87808 {
87809 atomic64_t *v = (atomic64_t *)l;
87810@@ -140,6 +218,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
87811
87812 typedef atomic_t atomic_long_t;
87813
87814+#ifdef CONFIG_PAX_REFCOUNT
87815+typedef atomic_unchecked_t atomic_long_unchecked_t;
87816+#else
87817+typedef atomic_t atomic_long_unchecked_t;
87818+#endif
87819+
87820 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
87821 static inline long atomic_long_read(atomic_long_t *l)
87822 {
87823@@ -148,6 +232,15 @@ static inline long atomic_long_read(atomic_long_t *l)
87824 return (long)atomic_read(v);
87825 }
87826
87827+#ifdef CONFIG_PAX_REFCOUNT
87828+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
87829+{
87830+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
87831+
87832+ return (long)atomic_read_unchecked(v);
87833+}
87834+#endif
87835+
87836 static inline void atomic_long_set(atomic_long_t *l, long i)
87837 {
87838 atomic_t *v = (atomic_t *)l;
87839@@ -155,6 +248,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
87840 atomic_set(v, i);
87841 }
87842
87843+#ifdef CONFIG_PAX_REFCOUNT
87844+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
87845+{
87846+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
87847+
87848+ atomic_set_unchecked(v, i);
87849+}
87850+#endif
87851+
87852 static inline void atomic_long_inc(atomic_long_t *l)
87853 {
87854 atomic_t *v = (atomic_t *)l;
87855@@ -162,6 +264,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
87856 atomic_inc(v);
87857 }
87858
87859+#ifdef CONFIG_PAX_REFCOUNT
87860+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
87861+{
87862+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
87863+
87864+ atomic_inc_unchecked(v);
87865+}
87866+#endif
87867+
87868 static inline void atomic_long_dec(atomic_long_t *l)
87869 {
87870 atomic_t *v = (atomic_t *)l;
87871@@ -169,6 +280,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
87872 atomic_dec(v);
87873 }
87874
87875+#ifdef CONFIG_PAX_REFCOUNT
87876+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
87877+{
87878+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
87879+
87880+ atomic_dec_unchecked(v);
87881+}
87882+#endif
87883+
87884 static inline void atomic_long_add(long i, atomic_long_t *l)
87885 {
87886 atomic_t *v = (atomic_t *)l;
87887@@ -176,6 +296,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
87888 atomic_add(i, v);
87889 }
87890
87891+#ifdef CONFIG_PAX_REFCOUNT
87892+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
87893+{
87894+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
87895+
87896+ atomic_add_unchecked(i, v);
87897+}
87898+#endif
87899+
87900 static inline void atomic_long_sub(long i, atomic_long_t *l)
87901 {
87902 atomic_t *v = (atomic_t *)l;
87903@@ -183,6 +312,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
87904 atomic_sub(i, v);
87905 }
87906
87907+#ifdef CONFIG_PAX_REFCOUNT
87908+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
87909+{
87910+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
87911+
87912+ atomic_sub_unchecked(i, v);
87913+}
87914+#endif
87915+
87916 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
87917 {
87918 atomic_t *v = (atomic_t *)l;
87919@@ -211,13 +349,23 @@ static inline int atomic_long_add_negative(long i, atomic_long_t *l)
87920 return atomic_add_negative(i, v);
87921 }
87922
87923-static inline long atomic_long_add_return(long i, atomic_long_t *l)
87924+static inline long __intentional_overflow(-1) atomic_long_add_return(long i, atomic_long_t *l)
87925 {
87926 atomic_t *v = (atomic_t *)l;
87927
87928 return (long)atomic_add_return(i, v);
87929 }
87930
87931+#ifdef CONFIG_PAX_REFCOUNT
87932+static inline long atomic_long_add_return_unchecked(long i, atomic_long_unchecked_t *l)
87933+{
87934+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
87935+
87936+ return (long)atomic_add_return_unchecked(i, v);
87937+}
87938+
87939+#endif
87940+
87941 static inline long atomic_long_sub_return(long i, atomic_long_t *l)
87942 {
87943 atomic_t *v = (atomic_t *)l;
87944@@ -232,6 +380,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
87945 return (long)atomic_inc_return(v);
87946 }
87947
87948+#ifdef CONFIG_PAX_REFCOUNT
87949+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
87950+{
87951+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
87952+
87953+ return (long)atomic_inc_return_unchecked(v);
87954+}
87955+#endif
87956+
87957 static inline long atomic_long_dec_return(atomic_long_t *l)
87958 {
87959 atomic_t *v = (atomic_t *)l;
87960@@ -255,4 +412,57 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
87961
87962 #endif /* BITS_PER_LONG == 64 */
87963
87964+#ifdef CONFIG_PAX_REFCOUNT
87965+static inline void pax_refcount_needs_these_functions(void)
87966+{
87967+ atomic_read_unchecked((atomic_unchecked_t *)NULL);
87968+ atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
87969+ atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
87970+ atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
87971+ atomic_inc_unchecked((atomic_unchecked_t *)NULL);
87972+ (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
87973+ atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
87974+ atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
87975+ atomic_dec_unchecked((atomic_unchecked_t *)NULL);
87976+ atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
87977+ (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
87978+#ifdef CONFIG_X86
87979+ atomic_clear_mask_unchecked(0, NULL);
87980+ atomic_set_mask_unchecked(0, NULL);
87981+#endif
87982+
87983+ atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
87984+ atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
87985+ atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
87986+ atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
87987+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
87988+ atomic_long_add_return_unchecked(0, (atomic_long_unchecked_t *)NULL);
87989+ atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
87990+ atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
87991+}
87992+#else
87993+#define atomic_read_unchecked(v) atomic_read(v)
87994+#define atomic_set_unchecked(v, i) atomic_set((v), (i))
87995+#define atomic_add_unchecked(i, v) atomic_add((i), (v))
87996+#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
87997+#define atomic_inc_unchecked(v) atomic_inc(v)
87998+#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
87999+#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
88000+#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
88001+#define atomic_dec_unchecked(v) atomic_dec(v)
88002+#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
88003+#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
88004+#define atomic_clear_mask_unchecked(mask, v) atomic_clear_mask((mask), (v))
88005+#define atomic_set_mask_unchecked(mask, v) atomic_set_mask((mask), (v))
88006+
88007+#define atomic_long_read_unchecked(v) atomic_long_read(v)
88008+#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
88009+#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
88010+#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
88011+#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
88012+#define atomic_long_add_return_unchecked(i, v) atomic_long_add_return((i), (v))
88013+#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
88014+#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
88015+#endif
88016+
88017 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
88018diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
88019index 30ad9c8..c70c170 100644
88020--- a/include/asm-generic/atomic64.h
88021+++ b/include/asm-generic/atomic64.h
88022@@ -16,6 +16,8 @@ typedef struct {
88023 long long counter;
88024 } atomic64_t;
88025
88026+typedef atomic64_t atomic64_unchecked_t;
88027+
88028 #define ATOMIC64_INIT(i) { (i) }
88029
88030 extern long long atomic64_read(const atomic64_t *v);
88031@@ -51,4 +53,14 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
88032 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
88033 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
88034
88035+#define atomic64_read_unchecked(v) atomic64_read(v)
88036+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
88037+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
88038+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
88039+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
88040+#define atomic64_inc_unchecked(v) atomic64_inc(v)
88041+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
88042+#define atomic64_dec_unchecked(v) atomic64_dec(v)
88043+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
88044+
88045 #endif /* _ASM_GENERIC_ATOMIC64_H */
88046diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h
88047index f5c40b0..e902f9d 100644
88048--- a/include/asm-generic/barrier.h
88049+++ b/include/asm-generic/barrier.h
88050@@ -82,7 +82,7 @@
88051 do { \
88052 compiletime_assert_atomic_type(*p); \
88053 smp_mb(); \
88054- ACCESS_ONCE(*p) = (v); \
88055+ ACCESS_ONCE_RW(*p) = (v); \
88056 } while (0)
88057
88058 #define smp_load_acquire(p) \
88059diff --git a/include/asm-generic/bitops/__fls.h b/include/asm-generic/bitops/__fls.h
88060index a60a7cc..0fe12f2 100644
88061--- a/include/asm-generic/bitops/__fls.h
88062+++ b/include/asm-generic/bitops/__fls.h
88063@@ -9,7 +9,7 @@
88064 *
88065 * Undefined if no set bit exists, so code should check against 0 first.
88066 */
88067-static __always_inline unsigned long __fls(unsigned long word)
88068+static __always_inline unsigned long __intentional_overflow(-1) __fls(unsigned long word)
88069 {
88070 int num = BITS_PER_LONG - 1;
88071
88072diff --git a/include/asm-generic/bitops/fls.h b/include/asm-generic/bitops/fls.h
88073index 0576d1f..dad6c71 100644
88074--- a/include/asm-generic/bitops/fls.h
88075+++ b/include/asm-generic/bitops/fls.h
88076@@ -9,7 +9,7 @@
88077 * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
88078 */
88079
88080-static __always_inline int fls(int x)
88081+static __always_inline int __intentional_overflow(-1) fls(int x)
88082 {
88083 int r = 32;
88084
88085diff --git a/include/asm-generic/bitops/fls64.h b/include/asm-generic/bitops/fls64.h
88086index b097cf8..3d40e14 100644
88087--- a/include/asm-generic/bitops/fls64.h
88088+++ b/include/asm-generic/bitops/fls64.h
88089@@ -15,7 +15,7 @@
88090 * at position 64.
88091 */
88092 #if BITS_PER_LONG == 32
88093-static __always_inline int fls64(__u64 x)
88094+static __always_inline int __intentional_overflow(-1) fls64(__u64 x)
88095 {
88096 __u32 h = x >> 32;
88097 if (h)
88098@@ -23,7 +23,7 @@ static __always_inline int fls64(__u64 x)
88099 return fls(x);
88100 }
88101 #elif BITS_PER_LONG == 64
88102-static __always_inline int fls64(__u64 x)
88103+static __always_inline int __intentional_overflow(-1) fls64(__u64 x)
88104 {
88105 if (x == 0)
88106 return 0;
88107diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
88108index 630dd23..8c1dcb6b 100644
88109--- a/include/asm-generic/bug.h
88110+++ b/include/asm-generic/bug.h
88111@@ -62,13 +62,13 @@ struct bug_entry {
88112 * to provide better diagnostics.
88113 */
88114 #ifndef __WARN_TAINT
88115-extern __printf(3, 4)
88116+extern __printf(3, 4) __nocapture(1, 3, 4)
88117 void warn_slowpath_fmt(const char *file, const int line,
88118 const char *fmt, ...);
88119-extern __printf(4, 5)
88120+extern __printf(4, 5) __nocapture(1, 4, 5)
88121 void warn_slowpath_fmt_taint(const char *file, const int line, unsigned taint,
88122 const char *fmt, ...);
88123-extern void warn_slowpath_null(const char *file, const int line);
88124+extern __nocapture(1) void warn_slowpath_null(const char *file, const int line);
88125 #define WANT_WARN_ON_SLOWPATH
88126 #define __WARN() warn_slowpath_null(__FILE__, __LINE__)
88127 #define __WARN_printf(arg...) warn_slowpath_fmt(__FILE__, __LINE__, arg)
88128diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
88129index 1bfcfe5..e04c5c9 100644
88130--- a/include/asm-generic/cache.h
88131+++ b/include/asm-generic/cache.h
88132@@ -6,7 +6,7 @@
88133 * cache lines need to provide their own cache.h.
88134 */
88135
88136-#define L1_CACHE_SHIFT 5
88137-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
88138+#define L1_CACHE_SHIFT 5UL
88139+#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
88140
88141 #endif /* __ASM_GENERIC_CACHE_H */
88142diff --git a/include/asm-generic/emergency-restart.h b/include/asm-generic/emergency-restart.h
88143index 0d68a1e..b74a761 100644
88144--- a/include/asm-generic/emergency-restart.h
88145+++ b/include/asm-generic/emergency-restart.h
88146@@ -1,7 +1,7 @@
88147 #ifndef _ASM_GENERIC_EMERGENCY_RESTART_H
88148 #define _ASM_GENERIC_EMERGENCY_RESTART_H
88149
88150-static inline void machine_emergency_restart(void)
88151+static inline __noreturn void machine_emergency_restart(void)
88152 {
88153 machine_restart(NULL);
88154 }
88155diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
88156index 90f99c7..00ce236 100644
88157--- a/include/asm-generic/kmap_types.h
88158+++ b/include/asm-generic/kmap_types.h
88159@@ -2,9 +2,9 @@
88160 #define _ASM_GENERIC_KMAP_TYPES_H
88161
88162 #ifdef __WITH_KM_FENCE
88163-# define KM_TYPE_NR 41
88164+# define KM_TYPE_NR 42
88165 #else
88166-# define KM_TYPE_NR 20
88167+# define KM_TYPE_NR 21
88168 #endif
88169
88170 #endif
88171diff --git a/include/asm-generic/local.h b/include/asm-generic/local.h
88172index 9ceb03b..62b0b8f 100644
88173--- a/include/asm-generic/local.h
88174+++ b/include/asm-generic/local.h
88175@@ -23,24 +23,37 @@ typedef struct
88176 atomic_long_t a;
88177 } local_t;
88178
88179+typedef struct {
88180+ atomic_long_unchecked_t a;
88181+} local_unchecked_t;
88182+
88183 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
88184
88185 #define local_read(l) atomic_long_read(&(l)->a)
88186+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
88187 #define local_set(l,i) atomic_long_set((&(l)->a),(i))
88188+#define local_set_unchecked(l,i) atomic_long_set_unchecked((&(l)->a),(i))
88189 #define local_inc(l) atomic_long_inc(&(l)->a)
88190+#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
88191 #define local_dec(l) atomic_long_dec(&(l)->a)
88192+#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
88193 #define local_add(i,l) atomic_long_add((i),(&(l)->a))
88194+#define local_add_unchecked(i,l) atomic_long_add_unchecked((i),(&(l)->a))
88195 #define local_sub(i,l) atomic_long_sub((i),(&(l)->a))
88196+#define local_sub_unchecked(i,l) atomic_long_sub_unchecked((i),(&(l)->a))
88197
88198 #define local_sub_and_test(i, l) atomic_long_sub_and_test((i), (&(l)->a))
88199 #define local_dec_and_test(l) atomic_long_dec_and_test(&(l)->a)
88200 #define local_inc_and_test(l) atomic_long_inc_and_test(&(l)->a)
88201 #define local_add_negative(i, l) atomic_long_add_negative((i), (&(l)->a))
88202 #define local_add_return(i, l) atomic_long_add_return((i), (&(l)->a))
88203+#define local_add_return_unchecked(i, l) atomic_long_add_return_unchecked((i), (&(l)->a))
88204 #define local_sub_return(i, l) atomic_long_sub_return((i), (&(l)->a))
88205 #define local_inc_return(l) atomic_long_inc_return(&(l)->a)
88206+#define local_dec_return(l) atomic_long_dec_return(&(l)->a)
88207
88208 #define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
88209+#define local_cmpxchg_unchecked(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
88210 #define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n))
88211 #define local_add_unless(l, _a, u) atomic_long_add_unless((&(l)->a), (_a), (u))
88212 #define local_inc_not_zero(l) atomic_long_inc_not_zero(&(l)->a)
88213diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
88214index 725612b..9cc513a 100644
88215--- a/include/asm-generic/pgtable-nopmd.h
88216+++ b/include/asm-generic/pgtable-nopmd.h
88217@@ -1,14 +1,19 @@
88218 #ifndef _PGTABLE_NOPMD_H
88219 #define _PGTABLE_NOPMD_H
88220
88221-#ifndef __ASSEMBLY__
88222-
88223 #include <asm-generic/pgtable-nopud.h>
88224
88225-struct mm_struct;
88226-
88227 #define __PAGETABLE_PMD_FOLDED
88228
88229+#define PMD_SHIFT PUD_SHIFT
88230+#define PTRS_PER_PMD 1
88231+#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
88232+#define PMD_MASK (~(PMD_SIZE-1))
88233+
88234+#ifndef __ASSEMBLY__
88235+
88236+struct mm_struct;
88237+
88238 /*
88239 * Having the pmd type consist of a pud gets the size right, and allows
88240 * us to conceptually access the pud entry that this pmd is folded into
88241@@ -16,11 +21,6 @@ struct mm_struct;
88242 */
88243 typedef struct { pud_t pud; } pmd_t;
88244
88245-#define PMD_SHIFT PUD_SHIFT
88246-#define PTRS_PER_PMD 1
88247-#define PMD_SIZE (1UL << PMD_SHIFT)
88248-#define PMD_MASK (~(PMD_SIZE-1))
88249-
88250 /*
88251 * The "pud_xxx()" functions here are trivial for a folded two-level
88252 * setup: the pmd is never bad, and a pmd always exists (as it's folded
88253diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
88254index 810431d..0ec4804f 100644
88255--- a/include/asm-generic/pgtable-nopud.h
88256+++ b/include/asm-generic/pgtable-nopud.h
88257@@ -1,10 +1,15 @@
88258 #ifndef _PGTABLE_NOPUD_H
88259 #define _PGTABLE_NOPUD_H
88260
88261-#ifndef __ASSEMBLY__
88262-
88263 #define __PAGETABLE_PUD_FOLDED
88264
88265+#define PUD_SHIFT PGDIR_SHIFT
88266+#define PTRS_PER_PUD 1
88267+#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
88268+#define PUD_MASK (~(PUD_SIZE-1))
88269+
88270+#ifndef __ASSEMBLY__
88271+
88272 /*
88273 * Having the pud type consist of a pgd gets the size right, and allows
88274 * us to conceptually access the pgd entry that this pud is folded into
88275@@ -12,11 +17,6 @@
88276 */
88277 typedef struct { pgd_t pgd; } pud_t;
88278
88279-#define PUD_SHIFT PGDIR_SHIFT
88280-#define PTRS_PER_PUD 1
88281-#define PUD_SIZE (1UL << PUD_SHIFT)
88282-#define PUD_MASK (~(PUD_SIZE-1))
88283-
88284 /*
88285 * The "pgd_xxx()" functions here are trivial for a folded two-level
88286 * setup: the pud is never bad, and a pud always exists (as it's folded
88287@@ -29,6 +29,7 @@ static inline void pgd_clear(pgd_t *pgd) { }
88288 #define pud_ERROR(pud) (pgd_ERROR((pud).pgd))
88289
88290 #define pgd_populate(mm, pgd, pud) do { } while (0)
88291+#define pgd_populate_kernel(mm, pgd, pud) do { } while (0)
88292 /*
88293 * (puds are folded into pgds so this doesn't get actually called,
88294 * but the define is needed for a generic inline function.)
88295diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
88296index 4d46085..f4e92ef 100644
88297--- a/include/asm-generic/pgtable.h
88298+++ b/include/asm-generic/pgtable.h
88299@@ -689,6 +689,22 @@ static inline int pmd_protnone(pmd_t pmd)
88300 }
88301 #endif /* CONFIG_NUMA_BALANCING */
88302
88303+#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
88304+#ifdef CONFIG_PAX_KERNEXEC
88305+#error KERNEXEC requires pax_open_kernel
88306+#else
88307+static inline unsigned long pax_open_kernel(void) { return 0; }
88308+#endif
88309+#endif
88310+
88311+#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
88312+#ifdef CONFIG_PAX_KERNEXEC
88313+#error KERNEXEC requires pax_close_kernel
88314+#else
88315+static inline unsigned long pax_close_kernel(void) { return 0; }
88316+#endif
88317+#endif
88318+
88319 #endif /* CONFIG_MMU */
88320
88321 #endif /* !__ASSEMBLY__ */
88322diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h
88323index 72d8803..cb9749c 100644
88324--- a/include/asm-generic/uaccess.h
88325+++ b/include/asm-generic/uaccess.h
88326@@ -343,4 +343,20 @@ clear_user(void __user *to, unsigned long n)
88327 return __clear_user(to, n);
88328 }
88329
88330+#ifndef __HAVE_ARCH_PAX_OPEN_USERLAND
88331+#ifdef CONFIG_PAX_MEMORY_UDEREF
88332+#error UDEREF requires pax_open_userland
88333+#else
88334+static inline unsigned long pax_open_userland(void) { return 0; }
88335+#endif
88336+#endif
88337+
88338+#ifndef __HAVE_ARCH_PAX_CLOSE_USERLAND
88339+#ifdef CONFIG_PAX_MEMORY_UDEREF
88340+#error UDEREF requires pax_close_userland
88341+#else
88342+static inline unsigned long pax_close_userland(void) { return 0; }
88343+#endif
88344+#endif
88345+
88346 #endif /* __ASM_GENERIC_UACCESS_H */
88347diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
88348index ac78910..8b5f068 100644
88349--- a/include/asm-generic/vmlinux.lds.h
88350+++ b/include/asm-generic/vmlinux.lds.h
88351@@ -234,6 +234,7 @@
88352 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
88353 VMLINUX_SYMBOL(__start_rodata) = .; \
88354 *(.rodata) *(.rodata.*) \
88355+ *(.data..read_only) \
88356 *(__vermagic) /* Kernel version magic */ \
88357 . = ALIGN(8); \
88358 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
88359@@ -492,6 +493,7 @@
88360 KERNEL_CTORS() \
88361 MCOUNT_REC() \
88362 *(.init.rodata) \
88363+ *(.init.rodata.*) \
88364 FTRACE_EVENTS() \
88365 TRACE_SYSCALLS() \
88366 KPROBE_BLACKLIST() \
88367@@ -511,6 +513,8 @@
88368
88369 #define EXIT_DATA \
88370 *(.exit.data) \
88371+ *(.exit.rodata) \
88372+ *(.exit.rodata.*) \
88373 MEM_DISCARD(exit.data) \
88374 MEM_DISCARD(exit.rodata)
88375
88376@@ -727,17 +731,18 @@
88377 * section in the linker script will go there too. @phdr should have
88378 * a leading colon.
88379 *
88380- * Note that this macros defines __per_cpu_load as an absolute symbol.
88381+ * Note that this macros defines per_cpu_load as an absolute symbol.
88382 * If there is no need to put the percpu section at a predetermined
88383 * address, use PERCPU_SECTION.
88384 */
88385 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
88386- VMLINUX_SYMBOL(__per_cpu_load) = .; \
88387- .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
88388+ per_cpu_load = .; \
88389+ .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
88390 - LOAD_OFFSET) { \
88391+ VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
88392 PERCPU_INPUT(cacheline) \
88393 } phdr \
88394- . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
88395+ . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
88396
88397 /**
88398 * PERCPU_SECTION - define output section for percpu area, simple version
88399diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
88400index 623a59c..1e79ab9 100644
88401--- a/include/crypto/algapi.h
88402+++ b/include/crypto/algapi.h
88403@@ -34,7 +34,7 @@ struct crypto_type {
88404 unsigned int maskclear;
88405 unsigned int maskset;
88406 unsigned int tfmsize;
88407-};
88408+} __do_const;
88409
88410 struct crypto_instance {
88411 struct crypto_alg alg;
88412diff --git a/include/drm/drmP.h b/include/drm/drmP.h
88413index e928625..ff97886 100644
88414--- a/include/drm/drmP.h
88415+++ b/include/drm/drmP.h
88416@@ -59,6 +59,7 @@
88417
88418 #include <asm/mman.h>
88419 #include <asm/pgalloc.h>
88420+#include <asm/local.h>
88421 #include <asm/uaccess.h>
88422
88423 #include <uapi/drm/drm.h>
88424@@ -133,17 +134,18 @@ void drm_err(const char *format, ...);
88425 /*@{*/
88426
88427 /* driver capabilities and requirements mask */
88428-#define DRIVER_USE_AGP 0x1
88429-#define DRIVER_PCI_DMA 0x8
88430-#define DRIVER_SG 0x10
88431-#define DRIVER_HAVE_DMA 0x20
88432-#define DRIVER_HAVE_IRQ 0x40
88433-#define DRIVER_IRQ_SHARED 0x80
88434-#define DRIVER_GEM 0x1000
88435-#define DRIVER_MODESET 0x2000
88436-#define DRIVER_PRIME 0x4000
88437-#define DRIVER_RENDER 0x8000
88438-#define DRIVER_ATOMIC 0x10000
88439+#define DRIVER_USE_AGP 0x1
88440+#define DRIVER_PCI_DMA 0x8
88441+#define DRIVER_SG 0x10
88442+#define DRIVER_HAVE_DMA 0x20
88443+#define DRIVER_HAVE_IRQ 0x40
88444+#define DRIVER_IRQ_SHARED 0x80
88445+#define DRIVER_GEM 0x1000
88446+#define DRIVER_MODESET 0x2000
88447+#define DRIVER_PRIME 0x4000
88448+#define DRIVER_RENDER 0x8000
88449+#define DRIVER_ATOMIC 0x10000
88450+#define DRIVER_KMS_LEGACY_CONTEXT 0x20000
88451
88452 /***********************************************************************/
88453 /** \name Macros to make printk easier */
88454@@ -224,10 +226,12 @@ void drm_err(const char *format, ...);
88455 * \param cmd command.
88456 * \param arg argument.
88457 */
88458-typedef int drm_ioctl_t(struct drm_device *dev, void *data,
88459+typedef int (* const drm_ioctl_t)(struct drm_device *dev, void *data,
88460+ struct drm_file *file_priv);
88461+typedef int (* drm_ioctl_no_const_t)(struct drm_device *dev, void *data,
88462 struct drm_file *file_priv);
88463
88464-typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
88465+typedef int (* const drm_ioctl_compat_t)(struct file *filp, unsigned int cmd,
88466 unsigned long arg);
88467
88468 #define DRM_IOCTL_NR(n) _IOC_NR(n)
88469@@ -243,10 +247,10 @@ typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
88470 struct drm_ioctl_desc {
88471 unsigned int cmd;
88472 int flags;
88473- drm_ioctl_t *func;
88474+ drm_ioctl_t func;
88475 unsigned int cmd_drv;
88476 const char *name;
88477-};
88478+} __do_const;
88479
88480 /**
88481 * Creates a driver or general drm_ioctl_desc array entry for the given
88482@@ -632,7 +636,8 @@ struct drm_info_list {
88483 int (*show)(struct seq_file*, void*); /** show callback */
88484 u32 driver_features; /**< Required driver features for this entry */
88485 void *data;
88486-};
88487+} __do_const;
88488+typedef struct drm_info_list __no_const drm_info_list_no_const;
88489
88490 /**
88491 * debugfs node structure. This structure represents a debugfs file.
88492@@ -716,7 +721,7 @@ struct drm_device {
88493
88494 /** \name Usage Counters */
88495 /*@{ */
88496- int open_count; /**< Outstanding files open, protected by drm_global_mutex. */
88497+ local_t open_count; /**< Outstanding files open, protected by drm_global_mutex. */
88498 spinlock_t buf_lock; /**< For drm_device::buf_use and a few other things. */
88499 int buf_use; /**< Buffers in use -- cannot alloc */
88500 atomic_t buf_alloc; /**< Buffer allocation in progress */
88501diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
88502index c250a22..59d2094 100644
88503--- a/include/drm/drm_crtc_helper.h
88504+++ b/include/drm/drm_crtc_helper.h
88505@@ -160,7 +160,7 @@ struct drm_encoder_helper_funcs {
88506 int (*atomic_check)(struct drm_encoder *encoder,
88507 struct drm_crtc_state *crtc_state,
88508 struct drm_connector_state *conn_state);
88509-};
88510+} __no_const;
88511
88512 /**
88513 * struct drm_connector_helper_funcs - helper operations for connectors
88514diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
88515index d016dc5..3951fe0 100644
88516--- a/include/drm/i915_pciids.h
88517+++ b/include/drm/i915_pciids.h
88518@@ -37,7 +37,7 @@
88519 */
88520 #define INTEL_VGA_DEVICE(id, info) { \
88521 0x8086, id, \
88522- ~0, ~0, \
88523+ PCI_ANY_ID, PCI_ANY_ID, \
88524 0x030000, 0xff0000, \
88525 (unsigned long) info }
88526
88527diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
88528index 72dcbe8..8db58d7 100644
88529--- a/include/drm/ttm/ttm_memory.h
88530+++ b/include/drm/ttm/ttm_memory.h
88531@@ -48,7 +48,7 @@
88532
88533 struct ttm_mem_shrink {
88534 int (*do_shrink) (struct ttm_mem_shrink *);
88535-};
88536+} __no_const;
88537
88538 /**
88539 * struct ttm_mem_global - Global memory accounting structure.
88540diff --git a/include/drm/ttm/ttm_page_alloc.h b/include/drm/ttm/ttm_page_alloc.h
88541index 49a8284..9643967 100644
88542--- a/include/drm/ttm/ttm_page_alloc.h
88543+++ b/include/drm/ttm/ttm_page_alloc.h
88544@@ -80,6 +80,7 @@ void ttm_dma_page_alloc_fini(void);
88545 */
88546 extern int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data);
88547
88548+struct device;
88549 extern int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev);
88550 extern void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev);
88551
88552diff --git a/include/keys/asymmetric-subtype.h b/include/keys/asymmetric-subtype.h
88553index 4b840e8..155d235 100644
88554--- a/include/keys/asymmetric-subtype.h
88555+++ b/include/keys/asymmetric-subtype.h
88556@@ -37,7 +37,7 @@ struct asymmetric_key_subtype {
88557 /* Verify the signature on a key of this subtype (optional) */
88558 int (*verify_signature)(const struct key *key,
88559 const struct public_key_signature *sig);
88560-};
88561+} __do_const;
88562
88563 /**
88564 * asymmetric_key_subtype - Get the subtype from an asymmetric key
88565diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
88566index c1da539..1dcec55 100644
88567--- a/include/linux/atmdev.h
88568+++ b/include/linux/atmdev.h
88569@@ -28,7 +28,7 @@ struct compat_atm_iobuf {
88570 #endif
88571
88572 struct k_atm_aal_stats {
88573-#define __HANDLE_ITEM(i) atomic_t i
88574+#define __HANDLE_ITEM(i) atomic_unchecked_t i
88575 __AAL_STAT_ITEMS
88576 #undef __HANDLE_ITEM
88577 };
88578@@ -200,7 +200,7 @@ struct atmdev_ops { /* only send is required */
88579 int (*change_qos)(struct atm_vcc *vcc,struct atm_qos *qos,int flags);
88580 int (*proc_read)(struct atm_dev *dev,loff_t *pos,char *page);
88581 struct module *owner;
88582-};
88583+} __do_const ;
88584
88585 struct atmphy_ops {
88586 int (*start)(struct atm_dev *dev);
88587diff --git a/include/linux/atomic.h b/include/linux/atomic.h
88588index 5b08a85..60922fb 100644
88589--- a/include/linux/atomic.h
88590+++ b/include/linux/atomic.h
88591@@ -12,7 +12,7 @@
88592 * Atomically adds @a to @v, so long as @v was not already @u.
88593 * Returns non-zero if @v was not @u, and zero otherwise.
88594 */
88595-static inline int atomic_add_unless(atomic_t *v, int a, int u)
88596+static inline int __intentional_overflow(-1) atomic_add_unless(atomic_t *v, int a, int u)
88597 {
88598 return __atomic_add_unless(v, a, u) != u;
88599 }
88600diff --git a/include/linux/audit.h b/include/linux/audit.h
88601index c2e7e3a..8bfc0e1 100644
88602--- a/include/linux/audit.h
88603+++ b/include/linux/audit.h
88604@@ -223,7 +223,7 @@ static inline void audit_ptrace(struct task_struct *t)
88605 extern unsigned int audit_serial(void);
88606 extern int auditsc_get_stamp(struct audit_context *ctx,
88607 struct timespec *t, unsigned int *serial);
88608-extern int audit_set_loginuid(kuid_t loginuid);
88609+extern int __intentional_overflow(-1) audit_set_loginuid(kuid_t loginuid);
88610
88611 static inline kuid_t audit_get_loginuid(struct task_struct *tsk)
88612 {
88613diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
88614index 576e463..28fd926 100644
88615--- a/include/linux/binfmts.h
88616+++ b/include/linux/binfmts.h
88617@@ -44,7 +44,7 @@ struct linux_binprm {
88618 unsigned interp_flags;
88619 unsigned interp_data;
88620 unsigned long loader, exec;
88621-};
88622+} __randomize_layout;
88623
88624 #define BINPRM_FLAGS_ENFORCE_NONDUMP_BIT 0
88625 #define BINPRM_FLAGS_ENFORCE_NONDUMP (1 << BINPRM_FLAGS_ENFORCE_NONDUMP_BIT)
88626@@ -77,8 +77,10 @@ struct linux_binfmt {
88627 int (*load_binary)(struct linux_binprm *);
88628 int (*load_shlib)(struct file *);
88629 int (*core_dump)(struct coredump_params *cprm);
88630+ void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
88631+ void (*handle_mmap)(struct file *);
88632 unsigned long min_coredump; /* minimal dump size */
88633-};
88634+} __do_const __randomize_layout;
88635
88636 extern void __register_binfmt(struct linux_binfmt *fmt, int insert);
88637
88638diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
88639index dbfbf49..10be372 100644
88640--- a/include/linux/bitmap.h
88641+++ b/include/linux/bitmap.h
88642@@ -299,7 +299,7 @@ static inline int bitmap_full(const unsigned long *src, unsigned int nbits)
88643 return __bitmap_full(src, nbits);
88644 }
88645
88646-static inline int bitmap_weight(const unsigned long *src, unsigned int nbits)
88647+static inline int __intentional_overflow(-1) bitmap_weight(const unsigned long *src, unsigned int nbits)
88648 {
88649 if (small_const_nbits(nbits))
88650 return hweight_long(*src & BITMAP_LAST_WORD_MASK(nbits));
88651diff --git a/include/linux/bitops.h b/include/linux/bitops.h
88652index 5d858e0..336c1d9 100644
88653--- a/include/linux/bitops.h
88654+++ b/include/linux/bitops.h
88655@@ -105,7 +105,7 @@ static inline __u64 ror64(__u64 word, unsigned int shift)
88656 * @word: value to rotate
88657 * @shift: bits to roll
88658 */
88659-static inline __u32 rol32(__u32 word, unsigned int shift)
88660+static inline __u32 __intentional_overflow(-1) rol32(__u32 word, unsigned int shift)
88661 {
88662 return (word << shift) | (word >> (32 - shift));
88663 }
88664@@ -115,7 +115,7 @@ static inline __u32 rol32(__u32 word, unsigned int shift)
88665 * @word: value to rotate
88666 * @shift: bits to roll
88667 */
88668-static inline __u32 ror32(__u32 word, unsigned int shift)
88669+static inline __u32 __intentional_overflow(-1) ror32(__u32 word, unsigned int shift)
88670 {
88671 return (word >> shift) | (word << (32 - shift));
88672 }
88673@@ -171,7 +171,7 @@ static inline __s32 sign_extend32(__u32 value, int index)
88674 return (__s32)(value << shift) >> shift;
88675 }
88676
88677-static inline unsigned fls_long(unsigned long l)
88678+static inline unsigned __intentional_overflow(-1) fls_long(unsigned long l)
88679 {
88680 if (sizeof(l) == 4)
88681 return fls(l);
88682diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
88683index 7f9a516..8889453 100644
88684--- a/include/linux/blkdev.h
88685+++ b/include/linux/blkdev.h
88686@@ -1616,7 +1616,7 @@ struct block_device_operations {
88687 /* this callback is with swap_lock and sometimes page table lock held */
88688 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
88689 struct module *owner;
88690-};
88691+} __do_const;
88692
88693 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
88694 unsigned long);
88695diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
88696index afc1343..9735539 100644
88697--- a/include/linux/blktrace_api.h
88698+++ b/include/linux/blktrace_api.h
88699@@ -25,7 +25,7 @@ struct blk_trace {
88700 struct dentry *dropped_file;
88701 struct dentry *msg_file;
88702 struct list_head running_list;
88703- atomic_t dropped;
88704+ atomic_unchecked_t dropped;
88705 };
88706
88707 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
88708diff --git a/include/linux/cache.h b/include/linux/cache.h
88709index 17e7e82..1d7da26 100644
88710--- a/include/linux/cache.h
88711+++ b/include/linux/cache.h
88712@@ -16,6 +16,14 @@
88713 #define __read_mostly
88714 #endif
88715
88716+#ifndef __read_only
88717+#ifdef CONFIG_PAX_KERNEXEC
88718+#error KERNEXEC requires __read_only
88719+#else
88720+#define __read_only __read_mostly
88721+#endif
88722+#endif
88723+
88724 #ifndef ____cacheline_aligned
88725 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
88726 #endif
88727diff --git a/include/linux/capability.h b/include/linux/capability.h
88728index aa93e5e..985a1b0 100644
88729--- a/include/linux/capability.h
88730+++ b/include/linux/capability.h
88731@@ -214,9 +214,14 @@ extern bool has_ns_capability_noaudit(struct task_struct *t,
88732 extern bool capable(int cap);
88733 extern bool ns_capable(struct user_namespace *ns, int cap);
88734 extern bool capable_wrt_inode_uidgid(const struct inode *inode, int cap);
88735+extern bool capable_wrt_inode_uidgid_nolog(const struct inode *inode, int cap);
88736 extern bool file_ns_capable(const struct file *file, struct user_namespace *ns, int cap);
88737+extern bool capable_nolog(int cap);
88738+extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
88739
88740 /* audit system wants to get cap info from files as well */
88741 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
88742
88743+extern int is_privileged_binary(const struct dentry *dentry);
88744+
88745 #endif /* !_LINUX_CAPABILITY_H */
88746diff --git a/include/linux/cdrom.h b/include/linux/cdrom.h
88747index 8609d57..86e4d79 100644
88748--- a/include/linux/cdrom.h
88749+++ b/include/linux/cdrom.h
88750@@ -87,7 +87,6 @@ struct cdrom_device_ops {
88751
88752 /* driver specifications */
88753 const int capability; /* capability flags */
88754- int n_minors; /* number of active minor devices */
88755 /* handle uniform packets for scsi type devices (scsi,atapi) */
88756 int (*generic_packet) (struct cdrom_device_info *,
88757 struct packet_command *);
88758diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h
88759index 4ce9056..86caac6 100644
88760--- a/include/linux/cleancache.h
88761+++ b/include/linux/cleancache.h
88762@@ -31,7 +31,7 @@ struct cleancache_ops {
88763 void (*invalidate_page)(int, struct cleancache_filekey, pgoff_t);
88764 void (*invalidate_inode)(int, struct cleancache_filekey);
88765 void (*invalidate_fs)(int);
88766-};
88767+} __no_const;
88768
88769 extern struct cleancache_ops *
88770 cleancache_register_ops(struct cleancache_ops *ops);
88771diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
88772index 5591ea7..61b77ce 100644
88773--- a/include/linux/clk-provider.h
88774+++ b/include/linux/clk-provider.h
88775@@ -195,6 +195,7 @@ struct clk_ops {
88776 void (*init)(struct clk_hw *hw);
88777 int (*debug_init)(struct clk_hw *hw, struct dentry *dentry);
88778 };
88779+typedef struct clk_ops __no_const clk_ops_no_const;
88780
88781 /**
88782 * struct clk_init_data - holds init data that's common to all clocks and is
88783diff --git a/include/linux/compat.h b/include/linux/compat.h
88784index ab25814..d1540d1 100644
88785--- a/include/linux/compat.h
88786+++ b/include/linux/compat.h
88787@@ -316,7 +316,7 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
88788 compat_size_t __user *len_ptr);
88789
88790 asmlinkage long compat_sys_ipc(u32, int, int, u32, compat_uptr_t, u32);
88791-asmlinkage long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg);
88792+asmlinkage long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg) __intentional_overflow(0);
88793 asmlinkage long compat_sys_semctl(int semid, int semnum, int cmd, int arg);
88794 asmlinkage long compat_sys_msgsnd(int msqid, compat_uptr_t msgp,
88795 compat_ssize_t msgsz, int msgflg);
88796@@ -325,7 +325,7 @@ asmlinkage long compat_sys_msgrcv(int msqid, compat_uptr_t msgp,
88797 long compat_sys_msgctl(int first, int second, void __user *uptr);
88798 long compat_sys_shmctl(int first, int second, void __user *uptr);
88799 long compat_sys_semtimedop(int semid, struct sembuf __user *tsems,
88800- unsigned nsems, const struct compat_timespec __user *timeout);
88801+ compat_long_t nsems, const struct compat_timespec __user *timeout);
88802 asmlinkage long compat_sys_keyctl(u32 option,
88803 u32 arg2, u32 arg3, u32 arg4, u32 arg5);
88804 asmlinkage long compat_sys_ustat(unsigned dev, struct compat_ustat __user *u32);
88805@@ -439,7 +439,7 @@ extern int compat_ptrace_request(struct task_struct *child,
88806 extern long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
88807 compat_ulong_t addr, compat_ulong_t data);
88808 asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
88809- compat_long_t addr, compat_long_t data);
88810+ compat_ulong_t addr, compat_ulong_t data);
88811
88812 asmlinkage long compat_sys_lookup_dcookie(u32, u32, char __user *, compat_size_t);
88813 /*
88814diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
88815index cdf13ca..ba5e086 100644
88816--- a/include/linux/compiler-gcc.h
88817+++ b/include/linux/compiler-gcc.h
88818@@ -94,8 +94,8 @@
88819 */
88820 #define __pure __attribute__((pure))
88821 #define __aligned(x) __attribute__((aligned(x)))
88822-#define __printf(a, b) __attribute__((format(printf, a, b)))
88823-#define __scanf(a, b) __attribute__((format(scanf, a, b)))
88824+#define __printf(a, b) __attribute__((format(printf, a, b))) __nocapture(a, b)
88825+#define __scanf(a, b) __attribute__((format(scanf, a, b))) __nocapture(a, b)
88826 #define noinline __attribute__((noinline))
88827 #define __attribute_const__ __attribute__((__const__))
88828 #define __maybe_unused __attribute__((unused))
88829diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
88830index 769e198..7ee7cb5 100644
88831--- a/include/linux/compiler-gcc4.h
88832+++ b/include/linux/compiler-gcc4.h
88833@@ -39,9 +39,38 @@
88834 # define __compiletime_warning(message) __attribute__((warning(message)))
88835 # define __compiletime_error(message) __attribute__((error(message)))
88836 #endif /* __CHECKER__ */
88837+
88838+#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
88839+#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
88840+#define __bos0(ptr) __bos((ptr), 0)
88841+#define __bos1(ptr) __bos((ptr), 1)
88842 #endif /* GCC_VERSION >= 40300 */
88843
88844 #if GCC_VERSION >= 40500
88845+
88846+#ifdef RANDSTRUCT_PLUGIN
88847+#define __randomize_layout __attribute__((randomize_layout))
88848+#define __no_randomize_layout __attribute__((no_randomize_layout))
88849+#endif
88850+
88851+#ifdef CONSTIFY_PLUGIN
88852+#define __no_const __attribute__((no_const))
88853+#define __do_const __attribute__((do_const))
88854+#endif
88855+
88856+#ifdef SIZE_OVERFLOW_PLUGIN
88857+#define __size_overflow(...) __attribute__((size_overflow(__VA_ARGS__)))
88858+#define __intentional_overflow(...) __attribute__((intentional_overflow(__VA_ARGS__)))
88859+#endif
88860+
88861+#ifdef LATENT_ENTROPY_PLUGIN
88862+#define __latent_entropy __attribute__((latent_entropy))
88863+#endif
88864+
88865+#ifdef INITIFY_PLUGIN
88866+#define __nocapture(...) __attribute__((nocapture(__VA_ARGS__)))
88867+#endif
88868+
88869 /*
88870 * Mark a position in code as unreachable. This can be used to
88871 * suppress control flow warnings after asm blocks that transfer
88872diff --git a/include/linux/compiler-gcc5.h b/include/linux/compiler-gcc5.h
88873index efee493..8aa8f6b 100644
88874--- a/include/linux/compiler-gcc5.h
88875+++ b/include/linux/compiler-gcc5.h
88876@@ -28,6 +28,34 @@
88877 # define __compiletime_error(message) __attribute__((error(message)))
88878 #endif /* __CHECKER__ */
88879
88880+#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
88881+#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
88882+#define __bos0(ptr) __bos((ptr), 0)
88883+#define __bos1(ptr) __bos((ptr), 1)
88884+
88885+#ifdef RANDSTRUCT_PLUGIN
88886+#define __randomize_layout __attribute__((randomize_layout))
88887+#define __no_randomize_layout __attribute__((no_randomize_layout))
88888+#endif
88889+
88890+#ifdef CONSTIFY_PLUGIN
88891+#define __no_const __attribute__((no_const))
88892+#define __do_const __attribute__((do_const))
88893+#endif
88894+
88895+#ifdef SIZE_OVERFLOW_PLUGIN
88896+#define __size_overflow(...) __attribute__((size_overflow(__VA_ARGS__)))
88897+#define __intentional_overflow(...) __attribute__((intentional_overflow(__VA_ARGS__)))
88898+#endif
88899+
88900+#ifdef LATENT_ENTROPY_PLUGIN
88901+#define __latent_entropy __attribute__((latent_entropy))
88902+#endif
88903+
88904+#ifdef INITIFY_PLUGIN
88905+#define __nocapture(...) __attribute__((nocapture(__VA_ARGS__)))
88906+#endif
88907+
88908 /*
88909 * Mark a position in code as unreachable. This can be used to
88910 * suppress control flow warnings after asm blocks that transfer
88911diff --git a/include/linux/compiler.h b/include/linux/compiler.h
88912index 1b45e4a..eff29a7 100644
88913--- a/include/linux/compiler.h
88914+++ b/include/linux/compiler.h
88915@@ -5,11 +5,14 @@
88916
88917 #ifdef __CHECKER__
88918 # define __user __attribute__((noderef, address_space(1)))
88919+# define __force_user __force __user
88920 # define __kernel __attribute__((address_space(0)))
88921+# define __force_kernel __force __kernel
88922 # define __safe __attribute__((safe))
88923 # define __force __attribute__((force))
88924 # define __nocast __attribute__((nocast))
88925 # define __iomem __attribute__((noderef, address_space(2)))
88926+# define __force_iomem __force __iomem
88927 # define __must_hold(x) __attribute__((context(x,1,1)))
88928 # define __acquires(x) __attribute__((context(x,0,1)))
88929 # define __releases(x) __attribute__((context(x,1,0)))
88930@@ -17,20 +20,37 @@
88931 # define __release(x) __context__(x,-1)
88932 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
88933 # define __percpu __attribute__((noderef, address_space(3)))
88934+# define __force_percpu __force __percpu
88935 #ifdef CONFIG_SPARSE_RCU_POINTER
88936 # define __rcu __attribute__((noderef, address_space(4)))
88937+# define __force_rcu __force __rcu
88938 #else
88939 # define __rcu
88940+# define __force_rcu
88941 #endif
88942 extern void __chk_user_ptr(const volatile void __user *);
88943 extern void __chk_io_ptr(const volatile void __iomem *);
88944 #else
88945-# define __user
88946-# define __kernel
88947+# ifdef CHECKER_PLUGIN
88948+//# define __user
88949+//# define __force_user
88950+//# define __kernel
88951+//# define __force_kernel
88952+# else
88953+# ifdef STRUCTLEAK_PLUGIN
88954+# define __user __attribute__((user))
88955+# else
88956+# define __user
88957+# endif
88958+# define __force_user
88959+# define __kernel
88960+# define __force_kernel
88961+# endif
88962 # define __safe
88963 # define __force
88964 # define __nocast
88965 # define __iomem
88966+# define __force_iomem
88967 # define __chk_user_ptr(x) (void)0
88968 # define __chk_io_ptr(x) (void)0
88969 # define __builtin_warning(x, y...) (1)
88970@@ -41,7 +61,9 @@ extern void __chk_io_ptr(const volatile void __iomem *);
88971 # define __release(x) (void)0
88972 # define __cond_lock(x,c) (c)
88973 # define __percpu
88974+# define __force_percpu
88975 # define __rcu
88976+# define __force_rcu
88977 #endif
88978
88979 /* Indirect macros required for expanded argument pasting, eg. __LINE__. */
88980@@ -205,32 +227,32 @@ static __always_inline void data_access_exceeds_word_size(void)
88981 static __always_inline void __read_once_size(const volatile void *p, void *res, int size)
88982 {
88983 switch (size) {
88984- case 1: *(__u8 *)res = *(volatile __u8 *)p; break;
88985- case 2: *(__u16 *)res = *(volatile __u16 *)p; break;
88986- case 4: *(__u32 *)res = *(volatile __u32 *)p; break;
88987+ case 1: *(__u8 *)res = *(const volatile __u8 *)p; break;
88988+ case 2: *(__u16 *)res = *(const volatile __u16 *)p; break;
88989+ case 4: *(__u32 *)res = *(const volatile __u32 *)p; break;
88990 #ifdef CONFIG_64BIT
88991- case 8: *(__u64 *)res = *(volatile __u64 *)p; break;
88992+ case 8: *(__u64 *)res = *(const volatile __u64 *)p; break;
88993 #endif
88994 default:
88995 barrier();
88996- __builtin_memcpy((void *)res, (const void *)p, size);
88997+ __builtin_memcpy(res, (const void *)p, size);
88998 data_access_exceeds_word_size();
88999 barrier();
89000 }
89001 }
89002
89003-static __always_inline void __write_once_size(volatile void *p, void *res, int size)
89004+static __always_inline void __write_once_size(volatile void *p, const void *res, int size)
89005 {
89006 switch (size) {
89007- case 1: *(volatile __u8 *)p = *(__u8 *)res; break;
89008- case 2: *(volatile __u16 *)p = *(__u16 *)res; break;
89009- case 4: *(volatile __u32 *)p = *(__u32 *)res; break;
89010+ case 1: *(volatile __u8 *)p = *(const __u8 *)res; break;
89011+ case 2: *(volatile __u16 *)p = *(const __u16 *)res; break;
89012+ case 4: *(volatile __u32 *)p = *(const __u32 *)res; break;
89013 #ifdef CONFIG_64BIT
89014- case 8: *(volatile __u64 *)p = *(__u64 *)res; break;
89015+ case 8: *(volatile __u64 *)p = *(const __u64 *)res; break;
89016 #endif
89017 default:
89018 barrier();
89019- __builtin_memcpy((void *)p, (const void *)res, size);
89020+ __builtin_memcpy((void *)p, res, size);
89021 data_access_exceeds_word_size();
89022 barrier();
89023 }
89024@@ -364,6 +386,38 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
89025 # define __attribute_const__ /* unimplemented */
89026 #endif
89027
89028+#ifndef __randomize_layout
89029+# define __randomize_layout
89030+#endif
89031+
89032+#ifndef __no_randomize_layout
89033+# define __no_randomize_layout
89034+#endif
89035+
89036+#ifndef __no_const
89037+# define __no_const
89038+#endif
89039+
89040+#ifndef __do_const
89041+# define __do_const
89042+#endif
89043+
89044+#ifndef __size_overflow
89045+# define __size_overflow(...)
89046+#endif
89047+
89048+#ifndef __intentional_overflow
89049+# define __intentional_overflow(...)
89050+#endif
89051+
89052+#ifndef __latent_entropy
89053+# define __latent_entropy
89054+#endif
89055+
89056+#ifndef __nocapture
89057+# define __nocapture(...)
89058+#endif
89059+
89060 /*
89061 * Tell gcc if a function is cold. The compiler will assume any path
89062 * directly leading to the call is unlikely.
89063@@ -373,6 +427,22 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
89064 #define __cold
89065 #endif
89066
89067+#ifndef __alloc_size
89068+#define __alloc_size(...)
89069+#endif
89070+
89071+#ifndef __bos
89072+#define __bos(ptr, arg)
89073+#endif
89074+
89075+#ifndef __bos0
89076+#define __bos0(ptr)
89077+#endif
89078+
89079+#ifndef __bos1
89080+#define __bos1(ptr)
89081+#endif
89082+
89083 /* Simple shorthand for a section definition */
89084 #ifndef __section
89085 # define __section(S) __attribute__ ((__section__(#S)))
89086@@ -387,6 +457,8 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
89087 # define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
89088 #endif
89089
89090+#define __type_is_unsigned(t) (__same_type((t)0, 0UL) || __same_type((t)0, 0U) || __same_type((t)0, (unsigned short)0) || __same_type((t)0, (unsigned char)0))
89091+
89092 /* Is this type a native word size -- useful for atomic operations */
89093 #ifndef __native_word
89094 # define __native_word(t) (sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long))
89095@@ -466,8 +538,9 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
89096 */
89097 #define __ACCESS_ONCE(x) ({ \
89098 __maybe_unused typeof(x) __var = (__force typeof(x)) 0; \
89099- (volatile typeof(x) *)&(x); })
89100+ (volatile const typeof(x) *)&(x); })
89101 #define ACCESS_ONCE(x) (*__ACCESS_ONCE(x))
89102+#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
89103
89104 /* Ignore/forbid kprobes attach on very low level functions marked by this attribute: */
89105 #ifdef CONFIG_KPROBES
89106diff --git a/include/linux/completion.h b/include/linux/completion.h
89107index 5d5aaae..0ea9b84 100644
89108--- a/include/linux/completion.h
89109+++ b/include/linux/completion.h
89110@@ -90,16 +90,16 @@ static inline void reinit_completion(struct completion *x)
89111
89112 extern void wait_for_completion(struct completion *);
89113 extern void wait_for_completion_io(struct completion *);
89114-extern int wait_for_completion_interruptible(struct completion *x);
89115-extern int wait_for_completion_killable(struct completion *x);
89116+extern int wait_for_completion_interruptible(struct completion *x) __intentional_overflow(-1);
89117+extern int wait_for_completion_killable(struct completion *x) __intentional_overflow(-1);
89118 extern unsigned long wait_for_completion_timeout(struct completion *x,
89119- unsigned long timeout);
89120+ unsigned long timeout) __intentional_overflow(-1);
89121 extern unsigned long wait_for_completion_io_timeout(struct completion *x,
89122- unsigned long timeout);
89123+ unsigned long timeout) __intentional_overflow(-1);
89124 extern long wait_for_completion_interruptible_timeout(
89125- struct completion *x, unsigned long timeout);
89126+ struct completion *x, unsigned long timeout) __intentional_overflow(-1);
89127 extern long wait_for_completion_killable_timeout(
89128- struct completion *x, unsigned long timeout);
89129+ struct completion *x, unsigned long timeout) __intentional_overflow(-1);
89130 extern bool try_wait_for_completion(struct completion *x);
89131 extern bool completion_done(struct completion *x);
89132
89133diff --git a/include/linux/configfs.h b/include/linux/configfs.h
89134index 34025df..2a6ee32 100644
89135--- a/include/linux/configfs.h
89136+++ b/include/linux/configfs.h
89137@@ -64,7 +64,7 @@ struct config_item {
89138 struct dentry *ci_dentry;
89139 };
89140
89141-extern int config_item_set_name(struct config_item *, const char *, ...);
89142+extern __printf(2, 3) int config_item_set_name(struct config_item *, const char *, ...);
89143
89144 static inline char *config_item_name(struct config_item * item)
89145 {
89146@@ -125,7 +125,7 @@ struct configfs_attribute {
89147 const char *ca_name;
89148 struct module *ca_owner;
89149 umode_t ca_mode;
89150-};
89151+} __do_const;
89152
89153 /*
89154 * Users often need to create attribute structures for their configurable
89155diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
89156index 2ee4888..0451f5e 100644
89157--- a/include/linux/cpufreq.h
89158+++ b/include/linux/cpufreq.h
89159@@ -207,6 +207,7 @@ struct global_attr {
89160 ssize_t (*store)(struct kobject *a, struct attribute *b,
89161 const char *c, size_t count);
89162 };
89163+typedef struct global_attr __no_const global_attr_no_const;
89164
89165 #define define_one_global_ro(_name) \
89166 static struct global_attr _name = \
89167@@ -278,7 +279,7 @@ struct cpufreq_driver {
89168 bool boost_supported;
89169 bool boost_enabled;
89170 int (*set_boost)(int state);
89171-};
89172+} __do_const;
89173
89174 /* flags */
89175 #define CPUFREQ_STICKY (1 << 0) /* driver isn't removed even if
89176diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
89177index 9c5e892..feb34e0 100644
89178--- a/include/linux/cpuidle.h
89179+++ b/include/linux/cpuidle.h
89180@@ -59,7 +59,8 @@ struct cpuidle_state {
89181 void (*enter_freeze) (struct cpuidle_device *dev,
89182 struct cpuidle_driver *drv,
89183 int index);
89184-};
89185+} __do_const;
89186+typedef struct cpuidle_state __no_const cpuidle_state_no_const;
89187
89188 /* Idle State Flags */
89189 #define CPUIDLE_FLAG_COUPLED (0x02) /* state applies to multiple cpus */
89190@@ -227,7 +228,7 @@ struct cpuidle_governor {
89191 void (*reflect) (struct cpuidle_device *dev, int index);
89192
89193 struct module *owner;
89194-};
89195+} __do_const;
89196
89197 #ifdef CONFIG_CPU_IDLE
89198 extern int cpuidle_register_governor(struct cpuidle_governor *gov);
89199diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
89200index 086549a..a572d94 100644
89201--- a/include/linux/cpumask.h
89202+++ b/include/linux/cpumask.h
89203@@ -126,17 +126,17 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp)
89204 }
89205
89206 /* Valid inputs for n are -1 and 0. */
89207-static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
89208+static inline unsigned int __intentional_overflow(-1) cpumask_next(int n, const struct cpumask *srcp)
89209 {
89210 return n+1;
89211 }
89212
89213-static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
89214+static inline unsigned int __intentional_overflow(-1) cpumask_next_zero(int n, const struct cpumask *srcp)
89215 {
89216 return n+1;
89217 }
89218
89219-static inline unsigned int cpumask_next_and(int n,
89220+static inline unsigned int __intentional_overflow(-1) cpumask_next_and(int n,
89221 const struct cpumask *srcp,
89222 const struct cpumask *andp)
89223 {
89224@@ -182,7 +182,7 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp)
89225 *
89226 * Returns >= nr_cpu_ids if no further cpus set.
89227 */
89228-static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
89229+static inline unsigned int __intentional_overflow(-1) cpumask_next(int n, const struct cpumask *srcp)
89230 {
89231 /* -1 is a legal arg here. */
89232 if (n != -1)
89233@@ -197,7 +197,7 @@ static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
89234 *
89235 * Returns >= nr_cpu_ids if no further cpus unset.
89236 */
89237-static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
89238+static inline unsigned int __intentional_overflow(-1) cpumask_next_zero(int n, const struct cpumask *srcp)
89239 {
89240 /* -1 is a legal arg here. */
89241 if (n != -1)
89242@@ -205,7 +205,7 @@ static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
89243 return find_next_zero_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1);
89244 }
89245
89246-int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *);
89247+int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *) __intentional_overflow(-1);
89248 int cpumask_any_but(const struct cpumask *mask, unsigned int cpu);
89249 int cpumask_set_cpu_local_first(int i, int numa_node, cpumask_t *dstp);
89250
89251@@ -472,7 +472,7 @@ static inline bool cpumask_full(const struct cpumask *srcp)
89252 * cpumask_weight - Count of bits in *srcp
89253 * @srcp: the cpumask to count bits (< nr_cpu_ids) in.
89254 */
89255-static inline unsigned int cpumask_weight(const struct cpumask *srcp)
89256+static inline unsigned int __intentional_overflow(-1) cpumask_weight(const struct cpumask *srcp)
89257 {
89258 return bitmap_weight(cpumask_bits(srcp), nr_cpumask_bits);
89259 }
89260diff --git a/include/linux/cred.h b/include/linux/cred.h
89261index 2fb2ca2..d6a3340 100644
89262--- a/include/linux/cred.h
89263+++ b/include/linux/cred.h
89264@@ -35,7 +35,7 @@ struct group_info {
89265 int nblocks;
89266 kgid_t small_block[NGROUPS_SMALL];
89267 kgid_t *blocks[0];
89268-};
89269+} __randomize_layout;
89270
89271 /**
89272 * get_group_info - Get a reference to a group info structure
89273@@ -137,7 +137,7 @@ struct cred {
89274 struct user_namespace *user_ns; /* user_ns the caps and keyrings are relative to. */
89275 struct group_info *group_info; /* supplementary groups for euid/fsgid */
89276 struct rcu_head rcu; /* RCU deletion hook */
89277-};
89278+} __randomize_layout;
89279
89280 extern void __put_cred(struct cred *);
89281 extern void exit_creds(struct task_struct *);
89282@@ -195,6 +195,9 @@ static inline void validate_creds_for_do_exit(struct task_struct *tsk)
89283 static inline void validate_process_creds(void)
89284 {
89285 }
89286+static inline void validate_task_creds(struct task_struct *task)
89287+{
89288+}
89289 #endif
89290
89291 /**
89292@@ -332,6 +335,7 @@ static inline void put_cred(const struct cred *_cred)
89293
89294 #define task_uid(task) (task_cred_xxx((task), uid))
89295 #define task_euid(task) (task_cred_xxx((task), euid))
89296+#define task_securebits(task) (task_cred_xxx((task), securebits))
89297
89298 #define current_cred_xxx(xxx) \
89299 ({ \
89300diff --git a/include/linux/crypto.h b/include/linux/crypto.h
89301index fb5ef16..05d1e59 100644
89302--- a/include/linux/crypto.h
89303+++ b/include/linux/crypto.h
89304@@ -626,7 +626,7 @@ struct cipher_tfm {
89305 const u8 *key, unsigned int keylen);
89306 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
89307 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
89308-};
89309+} __no_const;
89310
89311 struct hash_tfm {
89312 int (*init)(struct hash_desc *desc);
89313@@ -647,13 +647,13 @@ struct compress_tfm {
89314 int (*cot_decompress)(struct crypto_tfm *tfm,
89315 const u8 *src, unsigned int slen,
89316 u8 *dst, unsigned int *dlen);
89317-};
89318+} __no_const;
89319
89320 struct rng_tfm {
89321 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
89322 unsigned int dlen);
89323 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
89324-};
89325+} __no_const;
89326
89327 #define crt_ablkcipher crt_u.ablkcipher
89328 #define crt_aead crt_u.aead
89329diff --git a/include/linux/ctype.h b/include/linux/ctype.h
89330index 653589e..4ef254a 100644
89331--- a/include/linux/ctype.h
89332+++ b/include/linux/ctype.h
89333@@ -56,7 +56,7 @@ static inline unsigned char __toupper(unsigned char c)
89334 * Fast implementation of tolower() for internal usage. Do not use in your
89335 * code.
89336 */
89337-static inline char _tolower(const char c)
89338+static inline unsigned char _tolower(const unsigned char c)
89339 {
89340 return c | 0x20;
89341 }
89342diff --git a/include/linux/dcache.h b/include/linux/dcache.h
89343index d835879..c8e5b92 100644
89344--- a/include/linux/dcache.h
89345+++ b/include/linux/dcache.h
89346@@ -123,6 +123,9 @@ struct dentry {
89347 unsigned long d_time; /* used by d_revalidate */
89348 void *d_fsdata; /* fs-specific data */
89349
89350+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
89351+ atomic_t chroot_refcnt; /* tracks use of directory in chroot */
89352+#endif
89353 struct list_head d_lru; /* LRU list */
89354 struct list_head d_child; /* child of parent list */
89355 struct list_head d_subdirs; /* our children */
89356@@ -133,7 +136,7 @@ struct dentry {
89357 struct hlist_node d_alias; /* inode alias list */
89358 struct rcu_head d_rcu;
89359 } d_u;
89360-};
89361+} __randomize_layout;
89362
89363 /*
89364 * dentry->d_lock spinlock nesting subclasses:
89365@@ -319,7 +322,7 @@ extern struct dentry *__d_lookup_rcu(const struct dentry *parent,
89366
89367 static inline unsigned d_count(const struct dentry *dentry)
89368 {
89369- return dentry->d_lockref.count;
89370+ return __lockref_read(&dentry->d_lockref);
89371 }
89372
89373 /*
89374@@ -347,7 +350,7 @@ extern char *dentry_path(struct dentry *, char *, int);
89375 static inline struct dentry *dget_dlock(struct dentry *dentry)
89376 {
89377 if (dentry)
89378- dentry->d_lockref.count++;
89379+ __lockref_inc(&dentry->d_lockref);
89380 return dentry;
89381 }
89382
89383diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
89384index 7925bf0..d5143d2 100644
89385--- a/include/linux/decompress/mm.h
89386+++ b/include/linux/decompress/mm.h
89387@@ -77,7 +77,7 @@ static void free(void *where)
89388 * warnings when not needed (indeed large_malloc / large_free are not
89389 * needed by inflate */
89390
89391-#define malloc(a) kmalloc(a, GFP_KERNEL)
89392+#define malloc(a) kmalloc((a), GFP_KERNEL)
89393 #define free(a) kfree(a)
89394
89395 #define large_malloc(a) vmalloc(a)
89396diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h
89397index ce447f0..83c66bd 100644
89398--- a/include/linux/devfreq.h
89399+++ b/include/linux/devfreq.h
89400@@ -114,7 +114,7 @@ struct devfreq_governor {
89401 int (*get_target_freq)(struct devfreq *this, unsigned long *freq);
89402 int (*event_handler)(struct devfreq *devfreq,
89403 unsigned int event, void *data);
89404-};
89405+} __do_const;
89406
89407 /**
89408 * struct devfreq - Device devfreq structure
89409diff --git a/include/linux/device.h b/include/linux/device.h
89410index 0eb8ee2..c603b6a 100644
89411--- a/include/linux/device.h
89412+++ b/include/linux/device.h
89413@@ -311,7 +311,7 @@ struct subsys_interface {
89414 struct list_head node;
89415 int (*add_dev)(struct device *dev, struct subsys_interface *sif);
89416 int (*remove_dev)(struct device *dev, struct subsys_interface *sif);
89417-};
89418+} __do_const;
89419
89420 int subsys_interface_register(struct subsys_interface *sif);
89421 void subsys_interface_unregister(struct subsys_interface *sif);
89422@@ -507,7 +507,7 @@ struct device_type {
89423 void (*release)(struct device *dev);
89424
89425 const struct dev_pm_ops *pm;
89426-};
89427+} __do_const;
89428
89429 /* interface for exporting device attributes */
89430 struct device_attribute {
89431@@ -517,11 +517,12 @@ struct device_attribute {
89432 ssize_t (*store)(struct device *dev, struct device_attribute *attr,
89433 const char *buf, size_t count);
89434 };
89435+typedef struct device_attribute __no_const device_attribute_no_const;
89436
89437 struct dev_ext_attribute {
89438 struct device_attribute attr;
89439 void *var;
89440-};
89441+} __do_const;
89442
89443 ssize_t device_show_ulong(struct device *dev, struct device_attribute *attr,
89444 char *buf);
89445diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
89446index c3007cb..43efc8c 100644
89447--- a/include/linux/dma-mapping.h
89448+++ b/include/linux/dma-mapping.h
89449@@ -60,7 +60,7 @@ struct dma_map_ops {
89450 u64 (*get_required_mask)(struct device *dev);
89451 #endif
89452 int is_phys;
89453-};
89454+} __do_const;
89455
89456 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
89457
89458diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
89459index b6997a0..108be6c 100644
89460--- a/include/linux/dmaengine.h
89461+++ b/include/linux/dmaengine.h
89462@@ -1133,9 +1133,9 @@ struct dma_pinned_list {
89463 struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len);
89464 void dma_unpin_iovec_pages(struct dma_pinned_list* pinned_list);
89465
89466-dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
89467+dma_cookie_t __intentional_overflow(0) dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
89468 struct dma_pinned_list *pinned_list, unsigned char *kdata, size_t len);
89469-dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
89470+dma_cookie_t __intentional_overflow(0) dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
89471 struct dma_pinned_list *pinned_list, struct page *page,
89472 unsigned int offset, size_t len);
89473
89474diff --git a/include/linux/efi.h b/include/linux/efi.h
89475index cf7e431..d239dce 100644
89476--- a/include/linux/efi.h
89477+++ b/include/linux/efi.h
89478@@ -1056,6 +1056,7 @@ struct efivar_operations {
89479 efi_set_variable_nonblocking_t *set_variable_nonblocking;
89480 efi_query_variable_store_t *query_variable_store;
89481 };
89482+typedef struct efivar_operations __no_const efivar_operations_no_const;
89483
89484 struct efivars {
89485 /*
89486diff --git a/include/linux/elf.h b/include/linux/elf.h
89487index 20fa8d8..3d0dd18 100644
89488--- a/include/linux/elf.h
89489+++ b/include/linux/elf.h
89490@@ -29,6 +29,7 @@ extern Elf32_Dyn _DYNAMIC [];
89491 #define elf_note elf32_note
89492 #define elf_addr_t Elf32_Off
89493 #define Elf_Half Elf32_Half
89494+#define elf_dyn Elf32_Dyn
89495
89496 #else
89497
89498@@ -39,6 +40,7 @@ extern Elf64_Dyn _DYNAMIC [];
89499 #define elf_note elf64_note
89500 #define elf_addr_t Elf64_Off
89501 #define Elf_Half Elf64_Half
89502+#define elf_dyn Elf64_Dyn
89503
89504 #endif
89505
89506diff --git a/include/linux/err.h b/include/linux/err.h
89507index a729120..6ede2c9 100644
89508--- a/include/linux/err.h
89509+++ b/include/linux/err.h
89510@@ -20,12 +20,12 @@
89511
89512 #define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO)
89513
89514-static inline void * __must_check ERR_PTR(long error)
89515+static inline void * __must_check __intentional_overflow(-1) ERR_PTR(long error)
89516 {
89517 return (void *) error;
89518 }
89519
89520-static inline long __must_check PTR_ERR(__force const void *ptr)
89521+static inline long __must_check __intentional_overflow(-1) PTR_ERR(__force const void *ptr)
89522 {
89523 return (long) ptr;
89524 }
89525diff --git a/include/linux/extcon.h b/include/linux/extcon.h
89526index 36f49c4..a2a1f4c 100644
89527--- a/include/linux/extcon.h
89528+++ b/include/linux/extcon.h
89529@@ -135,7 +135,7 @@ struct extcon_dev {
89530 /* /sys/class/extcon/.../mutually_exclusive/... */
89531 struct attribute_group attr_g_muex;
89532 struct attribute **attrs_muex;
89533- struct device_attribute *d_attrs_muex;
89534+ device_attribute_no_const *d_attrs_muex;
89535 };
89536
89537 /**
89538diff --git a/include/linux/fb.h b/include/linux/fb.h
89539index 043f328..180ccbf 100644
89540--- a/include/linux/fb.h
89541+++ b/include/linux/fb.h
89542@@ -305,7 +305,8 @@ struct fb_ops {
89543 /* called at KDB enter and leave time to prepare the console */
89544 int (*fb_debug_enter)(struct fb_info *info);
89545 int (*fb_debug_leave)(struct fb_info *info);
89546-};
89547+} __do_const;
89548+typedef struct fb_ops __no_const fb_ops_no_const;
89549
89550 #ifdef CONFIG_FB_TILEBLITTING
89551 #define FB_TILE_CURSOR_NONE 0
89552diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h
89553index 230f87b..1fd0485 100644
89554--- a/include/linux/fdtable.h
89555+++ b/include/linux/fdtable.h
89556@@ -100,7 +100,7 @@ struct files_struct *get_files_struct(struct task_struct *);
89557 void put_files_struct(struct files_struct *fs);
89558 void reset_files_struct(struct files_struct *);
89559 int unshare_files(struct files_struct **);
89560-struct files_struct *dup_fd(struct files_struct *, int *);
89561+struct files_struct *dup_fd(struct files_struct *, int *) __latent_entropy;
89562 void do_close_on_exec(struct files_struct *);
89563 int iterate_fd(struct files_struct *, unsigned,
89564 int (*)(const void *, struct file *, unsigned),
89565diff --git a/include/linux/frontswap.h b/include/linux/frontswap.h
89566index 8293262..2b3b8bd 100644
89567--- a/include/linux/frontswap.h
89568+++ b/include/linux/frontswap.h
89569@@ -11,7 +11,7 @@ struct frontswap_ops {
89570 int (*load)(unsigned, pgoff_t, struct page *);
89571 void (*invalidate_page)(unsigned, pgoff_t);
89572 void (*invalidate_area)(unsigned);
89573-};
89574+} __no_const;
89575
89576 extern bool frontswap_enabled;
89577 extern struct frontswap_ops *
89578diff --git a/include/linux/fs.h b/include/linux/fs.h
89579index 52cc449..31f35cb 100644
89580--- a/include/linux/fs.h
89581+++ b/include/linux/fs.h
89582@@ -410,7 +410,7 @@ struct address_space {
89583 spinlock_t private_lock; /* for use by the address_space */
89584 struct list_head private_list; /* ditto */
89585 void *private_data; /* ditto */
89586-} __attribute__((aligned(sizeof(long))));
89587+} __attribute__((aligned(sizeof(long)))) __randomize_layout;
89588 /*
89589 * On most architectures that alignment is already the case; but
89590 * must be enforced here for CRIS, to let the least significant bit
89591@@ -453,7 +453,7 @@ struct block_device {
89592 int bd_fsfreeze_count;
89593 /* Mutex for freeze */
89594 struct mutex bd_fsfreeze_mutex;
89595-};
89596+} __randomize_layout;
89597
89598 /*
89599 * Radix-tree tags, for tagging dirty and writeback pages within the pagecache
89600@@ -639,7 +639,7 @@ struct inode {
89601 #endif
89602
89603 void *i_private; /* fs or device private pointer */
89604-};
89605+} __randomize_layout;
89606
89607 static inline int inode_unhashed(struct inode *inode)
89608 {
89609@@ -834,7 +834,7 @@ struct file {
89610 struct list_head f_tfile_llink;
89611 #endif /* #ifdef CONFIG_EPOLL */
89612 struct address_space *f_mapping;
89613-} __attribute__((aligned(4))); /* lest something weird decides that 2 is OK */
89614+} __attribute__((aligned(4))) __randomize_layout; /* lest something weird decides that 2 is OK */
89615
89616 struct file_handle {
89617 __u32 handle_bytes;
89618@@ -962,7 +962,7 @@ struct file_lock {
89619 int state; /* state of grant or error if -ve */
89620 } afs;
89621 } fl_u;
89622-};
89623+} __randomize_layout;
89624
89625 struct file_lock_context {
89626 spinlock_t flc_lock;
89627@@ -1316,7 +1316,7 @@ struct super_block {
89628 * Indicates how deep in a filesystem stack this SB is
89629 */
89630 int s_stack_depth;
89631-};
89632+} __randomize_layout;
89633
89634 extern struct timespec current_fs_time(struct super_block *sb);
89635
89636@@ -1570,7 +1570,8 @@ struct file_operations {
89637 #ifndef CONFIG_MMU
89638 unsigned (*mmap_capabilities)(struct file *);
89639 #endif
89640-};
89641+} __do_const __randomize_layout;
89642+typedef struct file_operations __no_const file_operations_no_const;
89643
89644 struct inode_operations {
89645 struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int);
89646@@ -2269,7 +2270,7 @@ extern int register_chrdev_region(dev_t, unsigned, const char *);
89647 extern int __register_chrdev(unsigned int major, unsigned int baseminor,
89648 unsigned int count, const char *name,
89649 const struct file_operations *fops);
89650-extern void __unregister_chrdev(unsigned int major, unsigned int baseminor,
89651+extern __nocapture(4) void __unregister_chrdev(unsigned int major, unsigned int baseminor,
89652 unsigned int count, const char *name);
89653 extern void unregister_chrdev_region(dev_t, unsigned);
89654 extern void chrdev_show(struct seq_file *,off_t);
89655@@ -2918,4 +2919,14 @@ static inline bool dir_relax(struct inode *inode)
89656 return !IS_DEADDIR(inode);
89657 }
89658
89659+static inline bool is_sidechannel_device(const struct inode *inode)
89660+{
89661+#ifdef CONFIG_GRKERNSEC_DEVICE_SIDECHANNEL
89662+ umode_t mode = inode->i_mode;
89663+ return ((S_ISCHR(mode) || S_ISBLK(mode)) && (mode & (S_IROTH | S_IWOTH)));
89664+#else
89665+ return false;
89666+#endif
89667+}
89668+
89669 #endif /* _LINUX_FS_H */
89670diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
89671index 0efc3e6..fd23610 100644
89672--- a/include/linux/fs_struct.h
89673+++ b/include/linux/fs_struct.h
89674@@ -6,13 +6,13 @@
89675 #include <linux/seqlock.h>
89676
89677 struct fs_struct {
89678- int users;
89679+ atomic_t users;
89680 spinlock_t lock;
89681 seqcount_t seq;
89682 int umask;
89683 int in_exec;
89684 struct path root, pwd;
89685-};
89686+} __randomize_layout;
89687
89688 extern struct kmem_cache *fs_cachep;
89689
89690diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
89691index 7714849..a4a5c7a 100644
89692--- a/include/linux/fscache-cache.h
89693+++ b/include/linux/fscache-cache.h
89694@@ -113,7 +113,7 @@ struct fscache_operation {
89695 fscache_operation_release_t release;
89696 };
89697
89698-extern atomic_t fscache_op_debug_id;
89699+extern atomic_unchecked_t fscache_op_debug_id;
89700 extern void fscache_op_work_func(struct work_struct *work);
89701
89702 extern void fscache_enqueue_operation(struct fscache_operation *);
89703@@ -135,7 +135,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
89704 INIT_WORK(&op->work, fscache_op_work_func);
89705 atomic_set(&op->usage, 1);
89706 op->state = FSCACHE_OP_ST_INITIALISED;
89707- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
89708+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
89709 op->processor = processor;
89710 op->release = release;
89711 INIT_LIST_HEAD(&op->pend_link);
89712diff --git a/include/linux/fscache.h b/include/linux/fscache.h
89713index 115bb81..e7b812b 100644
89714--- a/include/linux/fscache.h
89715+++ b/include/linux/fscache.h
89716@@ -152,7 +152,7 @@ struct fscache_cookie_def {
89717 * - this is mandatory for any object that may have data
89718 */
89719 void (*now_uncached)(void *cookie_netfs_data);
89720-};
89721+} __do_const;
89722
89723 /*
89724 * fscache cached network filesystem type
89725diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
89726index 7ee1774..72505b8 100644
89727--- a/include/linux/fsnotify.h
89728+++ b/include/linux/fsnotify.h
89729@@ -197,6 +197,9 @@ static inline void fsnotify_access(struct file *file)
89730 struct inode *inode = file_inode(file);
89731 __u32 mask = FS_ACCESS;
89732
89733+ if (is_sidechannel_device(inode))
89734+ return;
89735+
89736 if (S_ISDIR(inode->i_mode))
89737 mask |= FS_ISDIR;
89738
89739@@ -215,6 +218,9 @@ static inline void fsnotify_modify(struct file *file)
89740 struct inode *inode = file_inode(file);
89741 __u32 mask = FS_MODIFY;
89742
89743+ if (is_sidechannel_device(inode))
89744+ return;
89745+
89746 if (S_ISDIR(inode->i_mode))
89747 mask |= FS_ISDIR;
89748
89749@@ -317,7 +323,7 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
89750 */
89751 static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
89752 {
89753- return kstrdup(name, GFP_KERNEL);
89754+ return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
89755 }
89756
89757 /*
89758diff --git a/include/linux/genhd.h b/include/linux/genhd.h
89759index ec274e0..e678159 100644
89760--- a/include/linux/genhd.h
89761+++ b/include/linux/genhd.h
89762@@ -194,7 +194,7 @@ struct gendisk {
89763 struct kobject *slave_dir;
89764
89765 struct timer_rand_state *random;
89766- atomic_t sync_io; /* RAID */
89767+ atomic_unchecked_t sync_io; /* RAID */
89768 struct disk_events *ev;
89769 #ifdef CONFIG_BLK_DEV_INTEGRITY
89770 struct blk_integrity *integrity;
89771@@ -435,7 +435,7 @@ extern void disk_flush_events(struct gendisk *disk, unsigned int mask);
89772 extern unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask);
89773
89774 /* drivers/char/random.c */
89775-extern void add_disk_randomness(struct gendisk *disk);
89776+extern void add_disk_randomness(struct gendisk *disk) __latent_entropy;
89777 extern void rand_initialize_disk(struct gendisk *disk);
89778
89779 static inline sector_t get_start_sect(struct block_device *bdev)
89780diff --git a/include/linux/genl_magic_func.h b/include/linux/genl_magic_func.h
89781index 667c311..abac2a7 100644
89782--- a/include/linux/genl_magic_func.h
89783+++ b/include/linux/genl_magic_func.h
89784@@ -246,7 +246,7 @@ const char *CONCAT_(GENL_MAGIC_FAMILY, _genl_cmd_to_str)(__u8 cmd)
89785 },
89786
89787 #define ZZZ_genl_ops CONCAT_(GENL_MAGIC_FAMILY, _genl_ops)
89788-static struct genl_ops ZZZ_genl_ops[] __read_mostly = {
89789+static struct genl_ops ZZZ_genl_ops[] = {
89790 #include GENL_MAGIC_INCLUDE_FILE
89791 };
89792
89793diff --git a/include/linux/gfp.h b/include/linux/gfp.h
89794index eb6fafe..9360779 100644
89795--- a/include/linux/gfp.h
89796+++ b/include/linux/gfp.h
89797@@ -35,6 +35,13 @@ struct vm_area_struct;
89798 #define ___GFP_NO_KSWAPD 0x400000u
89799 #define ___GFP_OTHER_NODE 0x800000u
89800 #define ___GFP_WRITE 0x1000000u
89801+
89802+#ifdef CONFIG_PAX_USERCOPY_SLABS
89803+#define ___GFP_USERCOPY 0x2000000u
89804+#else
89805+#define ___GFP_USERCOPY 0
89806+#endif
89807+
89808 /* If the above are modified, __GFP_BITS_SHIFT may need updating */
89809
89810 /*
89811@@ -92,6 +99,7 @@ struct vm_area_struct;
89812 #define __GFP_NO_KSWAPD ((__force gfp_t)___GFP_NO_KSWAPD)
89813 #define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* On behalf of other node */
89814 #define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) /* Allocator intends to dirty page */
89815+#define __GFP_USERCOPY ((__force gfp_t)___GFP_USERCOPY)/* Allocator intends to copy page to/from userland */
89816
89817 /*
89818 * This may seem redundant, but it's a way of annotating false positives vs.
89819@@ -99,7 +107,7 @@ struct vm_area_struct;
89820 */
89821 #define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
89822
89823-#define __GFP_BITS_SHIFT 25 /* Room for N __GFP_FOO bits */
89824+#define __GFP_BITS_SHIFT 26 /* Room for N __GFP_FOO bits */
89825 #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
89826
89827 /* This equals 0, but use constants in case they ever change */
89828@@ -154,6 +162,8 @@ struct vm_area_struct;
89829 /* 4GB DMA on some platforms */
89830 #define GFP_DMA32 __GFP_DMA32
89831
89832+#define GFP_USERCOPY __GFP_USERCOPY
89833+
89834 /* Convert GFP flags to their corresponding migrate type */
89835 static inline int gfpflags_to_migratetype(const gfp_t gfp_flags)
89836 {
89837diff --git a/include/linux/gracl.h b/include/linux/gracl.h
89838new file mode 100644
89839index 0000000..91858e4
89840--- /dev/null
89841+++ b/include/linux/gracl.h
89842@@ -0,0 +1,342 @@
89843+#ifndef GR_ACL_H
89844+#define GR_ACL_H
89845+
89846+#include <linux/grdefs.h>
89847+#include <linux/resource.h>
89848+#include <linux/capability.h>
89849+#include <linux/dcache.h>
89850+#include <asm/resource.h>
89851+
89852+/* Major status information */
89853+
89854+#define GR_VERSION "grsecurity 3.1"
89855+#define GRSECURITY_VERSION 0x3100
89856+
89857+enum {
89858+ GR_SHUTDOWN = 0,
89859+ GR_ENABLE = 1,
89860+ GR_SPROLE = 2,
89861+ GR_OLDRELOAD = 3,
89862+ GR_SEGVMOD = 4,
89863+ GR_STATUS = 5,
89864+ GR_UNSPROLE = 6,
89865+ GR_PASSSET = 7,
89866+ GR_SPROLEPAM = 8,
89867+ GR_RELOAD = 9,
89868+};
89869+
89870+/* Password setup definitions
89871+ * kernel/grhash.c */
89872+enum {
89873+ GR_PW_LEN = 128,
89874+ GR_SALT_LEN = 16,
89875+ GR_SHA_LEN = 32,
89876+};
89877+
89878+enum {
89879+ GR_SPROLE_LEN = 64,
89880+};
89881+
89882+enum {
89883+ GR_NO_GLOB = 0,
89884+ GR_REG_GLOB,
89885+ GR_CREATE_GLOB
89886+};
89887+
89888+#define GR_NLIMITS 32
89889+
89890+/* Begin Data Structures */
89891+
89892+struct sprole_pw {
89893+ unsigned char *rolename;
89894+ unsigned char salt[GR_SALT_LEN];
89895+ unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
89896+};
89897+
89898+struct name_entry {
89899+ __u32 key;
89900+ u64 inode;
89901+ dev_t device;
89902+ char *name;
89903+ __u16 len;
89904+ __u8 deleted;
89905+ struct name_entry *prev;
89906+ struct name_entry *next;
89907+};
89908+
89909+struct inodev_entry {
89910+ struct name_entry *nentry;
89911+ struct inodev_entry *prev;
89912+ struct inodev_entry *next;
89913+};
89914+
89915+struct acl_role_db {
89916+ struct acl_role_label **r_hash;
89917+ __u32 r_size;
89918+};
89919+
89920+struct inodev_db {
89921+ struct inodev_entry **i_hash;
89922+ __u32 i_size;
89923+};
89924+
89925+struct name_db {
89926+ struct name_entry **n_hash;
89927+ __u32 n_size;
89928+};
89929+
89930+struct crash_uid {
89931+ uid_t uid;
89932+ unsigned long expires;
89933+};
89934+
89935+struct gr_hash_struct {
89936+ void **table;
89937+ void **nametable;
89938+ void *first;
89939+ __u32 table_size;
89940+ __u32 used_size;
89941+ int type;
89942+};
89943+
89944+/* Userspace Grsecurity ACL data structures */
89945+
89946+struct acl_subject_label {
89947+ char *filename;
89948+ u64 inode;
89949+ dev_t device;
89950+ __u32 mode;
89951+ kernel_cap_t cap_mask;
89952+ kernel_cap_t cap_lower;
89953+ kernel_cap_t cap_invert_audit;
89954+
89955+ struct rlimit res[GR_NLIMITS];
89956+ __u32 resmask;
89957+
89958+ __u8 user_trans_type;
89959+ __u8 group_trans_type;
89960+ uid_t *user_transitions;
89961+ gid_t *group_transitions;
89962+ __u16 user_trans_num;
89963+ __u16 group_trans_num;
89964+
89965+ __u32 sock_families[2];
89966+ __u32 ip_proto[8];
89967+ __u32 ip_type;
89968+ struct acl_ip_label **ips;
89969+ __u32 ip_num;
89970+ __u32 inaddr_any_override;
89971+
89972+ __u32 crashes;
89973+ unsigned long expires;
89974+
89975+ struct acl_subject_label *parent_subject;
89976+ struct gr_hash_struct *hash;
89977+ struct acl_subject_label *prev;
89978+ struct acl_subject_label *next;
89979+
89980+ struct acl_object_label **obj_hash;
89981+ __u32 obj_hash_size;
89982+ __u16 pax_flags;
89983+};
89984+
89985+struct role_allowed_ip {
89986+ __u32 addr;
89987+ __u32 netmask;
89988+
89989+ struct role_allowed_ip *prev;
89990+ struct role_allowed_ip *next;
89991+};
89992+
89993+struct role_transition {
89994+ char *rolename;
89995+
89996+ struct role_transition *prev;
89997+ struct role_transition *next;
89998+};
89999+
90000+struct acl_role_label {
90001+ char *rolename;
90002+ uid_t uidgid;
90003+ __u16 roletype;
90004+
90005+ __u16 auth_attempts;
90006+ unsigned long expires;
90007+
90008+ struct acl_subject_label *root_label;
90009+ struct gr_hash_struct *hash;
90010+
90011+ struct acl_role_label *prev;
90012+ struct acl_role_label *next;
90013+
90014+ struct role_transition *transitions;
90015+ struct role_allowed_ip *allowed_ips;
90016+ uid_t *domain_children;
90017+ __u16 domain_child_num;
90018+
90019+ umode_t umask;
90020+
90021+ struct acl_subject_label **subj_hash;
90022+ __u32 subj_hash_size;
90023+};
90024+
90025+struct user_acl_role_db {
90026+ struct acl_role_label **r_table;
90027+ __u32 num_pointers; /* Number of allocations to track */
90028+ __u32 num_roles; /* Number of roles */
90029+ __u32 num_domain_children; /* Number of domain children */
90030+ __u32 num_subjects; /* Number of subjects */
90031+ __u32 num_objects; /* Number of objects */
90032+};
90033+
90034+struct acl_object_label {
90035+ char *filename;
90036+ u64 inode;
90037+ dev_t device;
90038+ __u32 mode;
90039+
90040+ struct acl_subject_label *nested;
90041+ struct acl_object_label *globbed;
90042+
90043+ /* next two structures not used */
90044+
90045+ struct acl_object_label *prev;
90046+ struct acl_object_label *next;
90047+};
90048+
90049+struct acl_ip_label {
90050+ char *iface;
90051+ __u32 addr;
90052+ __u32 netmask;
90053+ __u16 low, high;
90054+ __u8 mode;
90055+ __u32 type;
90056+ __u32 proto[8];
90057+
90058+ /* next two structures not used */
90059+
90060+ struct acl_ip_label *prev;
90061+ struct acl_ip_label *next;
90062+};
90063+
90064+struct gr_arg {
90065+ struct user_acl_role_db role_db;
90066+ unsigned char pw[GR_PW_LEN];
90067+ unsigned char salt[GR_SALT_LEN];
90068+ unsigned char sum[GR_SHA_LEN];
90069+ unsigned char sp_role[GR_SPROLE_LEN];
90070+ struct sprole_pw *sprole_pws;
90071+ dev_t segv_device;
90072+ u64 segv_inode;
90073+ uid_t segv_uid;
90074+ __u16 num_sprole_pws;
90075+ __u16 mode;
90076+};
90077+
90078+struct gr_arg_wrapper {
90079+ struct gr_arg *arg;
90080+ __u32 version;
90081+ __u32 size;
90082+};
90083+
90084+struct subject_map {
90085+ struct acl_subject_label *user;
90086+ struct acl_subject_label *kernel;
90087+ struct subject_map *prev;
90088+ struct subject_map *next;
90089+};
90090+
90091+struct acl_subj_map_db {
90092+ struct subject_map **s_hash;
90093+ __u32 s_size;
90094+};
90095+
90096+struct gr_policy_state {
90097+ struct sprole_pw **acl_special_roles;
90098+ __u16 num_sprole_pws;
90099+ struct acl_role_label *kernel_role;
90100+ struct acl_role_label *role_list;
90101+ struct acl_role_label *default_role;
90102+ struct acl_role_db acl_role_set;
90103+ struct acl_subj_map_db subj_map_set;
90104+ struct name_db name_set;
90105+ struct inodev_db inodev_set;
90106+};
90107+
90108+struct gr_alloc_state {
90109+ unsigned long alloc_stack_next;
90110+ unsigned long alloc_stack_size;
90111+ void **alloc_stack;
90112+};
90113+
90114+struct gr_reload_state {
90115+ struct gr_policy_state oldpolicy;
90116+ struct gr_alloc_state oldalloc;
90117+ struct gr_policy_state newpolicy;
90118+ struct gr_alloc_state newalloc;
90119+ struct gr_policy_state *oldpolicy_ptr;
90120+ struct gr_alloc_state *oldalloc_ptr;
90121+ unsigned char oldmode;
90122+};
90123+
90124+/* End Data Structures Section */
90125+
90126+/* Hash functions generated by empirical testing by Brad Spengler
90127+ Makes good use of the low bits of the inode. Generally 0-1 times
90128+ in loop for successful match. 0-3 for unsuccessful match.
90129+ Shift/add algorithm with modulus of table size and an XOR*/
90130+
90131+static __inline__ unsigned int
90132+gr_rhash(const uid_t uid, const __u16 type, const unsigned int sz)
90133+{
90134+ return ((((uid + type) << (16 + type)) ^ uid) % sz);
90135+}
90136+
90137+ static __inline__ unsigned int
90138+gr_shash(const struct acl_subject_label *userp, const unsigned int sz)
90139+{
90140+ return ((const unsigned long)userp % sz);
90141+}
90142+
90143+static __inline__ unsigned int
90144+gr_fhash(const u64 ino, const dev_t dev, const unsigned int sz)
90145+{
90146+ unsigned int rem;
90147+ div_u64_rem((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9)), sz, &rem);
90148+ return rem;
90149+}
90150+
90151+static __inline__ unsigned int
90152+gr_nhash(const char *name, const __u16 len, const unsigned int sz)
90153+{
90154+ return full_name_hash((const unsigned char *)name, len) % sz;
90155+}
90156+
90157+#define FOR_EACH_SUBJECT_START(role,subj,iter) \
90158+ subj = NULL; \
90159+ iter = 0; \
90160+ while (iter < role->subj_hash_size) { \
90161+ if (subj == NULL) \
90162+ subj = role->subj_hash[iter]; \
90163+ if (subj == NULL) { \
90164+ iter++; \
90165+ continue; \
90166+ }
90167+
90168+#define FOR_EACH_SUBJECT_END(subj,iter) \
90169+ subj = subj->next; \
90170+ if (subj == NULL) \
90171+ iter++; \
90172+ }
90173+
90174+
90175+#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
90176+ subj = role->hash->first; \
90177+ while (subj != NULL) {
90178+
90179+#define FOR_EACH_NESTED_SUBJECT_END(subj) \
90180+ subj = subj->next; \
90181+ }
90182+
90183+#endif
90184+
90185diff --git a/include/linux/gracl_compat.h b/include/linux/gracl_compat.h
90186new file mode 100644
90187index 0000000..af64092
90188--- /dev/null
90189+++ b/include/linux/gracl_compat.h
90190@@ -0,0 +1,156 @@
90191+#ifndef GR_ACL_COMPAT_H
90192+#define GR_ACL_COMPAT_H
90193+
90194+#include <linux/resource.h>
90195+#include <asm/resource.h>
90196+
90197+struct sprole_pw_compat {
90198+ compat_uptr_t rolename;
90199+ unsigned char salt[GR_SALT_LEN];
90200+ unsigned char sum[GR_SHA_LEN];
90201+};
90202+
90203+struct gr_hash_struct_compat {
90204+ compat_uptr_t table;
90205+ compat_uptr_t nametable;
90206+ compat_uptr_t first;
90207+ __u32 table_size;
90208+ __u32 used_size;
90209+ int type;
90210+};
90211+
90212+struct acl_subject_label_compat {
90213+ compat_uptr_t filename;
90214+ compat_u64 inode;
90215+ __u32 device;
90216+ __u32 mode;
90217+ kernel_cap_t cap_mask;
90218+ kernel_cap_t cap_lower;
90219+ kernel_cap_t cap_invert_audit;
90220+
90221+ struct compat_rlimit res[GR_NLIMITS];
90222+ __u32 resmask;
90223+
90224+ __u8 user_trans_type;
90225+ __u8 group_trans_type;
90226+ compat_uptr_t user_transitions;
90227+ compat_uptr_t group_transitions;
90228+ __u16 user_trans_num;
90229+ __u16 group_trans_num;
90230+
90231+ __u32 sock_families[2];
90232+ __u32 ip_proto[8];
90233+ __u32 ip_type;
90234+ compat_uptr_t ips;
90235+ __u32 ip_num;
90236+ __u32 inaddr_any_override;
90237+
90238+ __u32 crashes;
90239+ compat_ulong_t expires;
90240+
90241+ compat_uptr_t parent_subject;
90242+ compat_uptr_t hash;
90243+ compat_uptr_t prev;
90244+ compat_uptr_t next;
90245+
90246+ compat_uptr_t obj_hash;
90247+ __u32 obj_hash_size;
90248+ __u16 pax_flags;
90249+};
90250+
90251+struct role_allowed_ip_compat {
90252+ __u32 addr;
90253+ __u32 netmask;
90254+
90255+ compat_uptr_t prev;
90256+ compat_uptr_t next;
90257+};
90258+
90259+struct role_transition_compat {
90260+ compat_uptr_t rolename;
90261+
90262+ compat_uptr_t prev;
90263+ compat_uptr_t next;
90264+};
90265+
90266+struct acl_role_label_compat {
90267+ compat_uptr_t rolename;
90268+ uid_t uidgid;
90269+ __u16 roletype;
90270+
90271+ __u16 auth_attempts;
90272+ compat_ulong_t expires;
90273+
90274+ compat_uptr_t root_label;
90275+ compat_uptr_t hash;
90276+
90277+ compat_uptr_t prev;
90278+ compat_uptr_t next;
90279+
90280+ compat_uptr_t transitions;
90281+ compat_uptr_t allowed_ips;
90282+ compat_uptr_t domain_children;
90283+ __u16 domain_child_num;
90284+
90285+ umode_t umask;
90286+
90287+ compat_uptr_t subj_hash;
90288+ __u32 subj_hash_size;
90289+};
90290+
90291+struct user_acl_role_db_compat {
90292+ compat_uptr_t r_table;
90293+ __u32 num_pointers;
90294+ __u32 num_roles;
90295+ __u32 num_domain_children;
90296+ __u32 num_subjects;
90297+ __u32 num_objects;
90298+};
90299+
90300+struct acl_object_label_compat {
90301+ compat_uptr_t filename;
90302+ compat_u64 inode;
90303+ __u32 device;
90304+ __u32 mode;
90305+
90306+ compat_uptr_t nested;
90307+ compat_uptr_t globbed;
90308+
90309+ compat_uptr_t prev;
90310+ compat_uptr_t next;
90311+};
90312+
90313+struct acl_ip_label_compat {
90314+ compat_uptr_t iface;
90315+ __u32 addr;
90316+ __u32 netmask;
90317+ __u16 low, high;
90318+ __u8 mode;
90319+ __u32 type;
90320+ __u32 proto[8];
90321+
90322+ compat_uptr_t prev;
90323+ compat_uptr_t next;
90324+};
90325+
90326+struct gr_arg_compat {
90327+ struct user_acl_role_db_compat role_db;
90328+ unsigned char pw[GR_PW_LEN];
90329+ unsigned char salt[GR_SALT_LEN];
90330+ unsigned char sum[GR_SHA_LEN];
90331+ unsigned char sp_role[GR_SPROLE_LEN];
90332+ compat_uptr_t sprole_pws;
90333+ __u32 segv_device;
90334+ compat_u64 segv_inode;
90335+ uid_t segv_uid;
90336+ __u16 num_sprole_pws;
90337+ __u16 mode;
90338+};
90339+
90340+struct gr_arg_wrapper_compat {
90341+ compat_uptr_t arg;
90342+ __u32 version;
90343+ __u32 size;
90344+};
90345+
90346+#endif
90347diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h
90348new file mode 100644
90349index 0000000..323ecf2
90350--- /dev/null
90351+++ b/include/linux/gralloc.h
90352@@ -0,0 +1,9 @@
90353+#ifndef __GRALLOC_H
90354+#define __GRALLOC_H
90355+
90356+void acl_free_all(void);
90357+int acl_alloc_stack_init(unsigned long size);
90358+void *acl_alloc(unsigned long len);
90359+void *acl_alloc_num(unsigned long num, unsigned long len);
90360+
90361+#endif
90362diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h
90363new file mode 100644
90364index 0000000..be66033
90365--- /dev/null
90366+++ b/include/linux/grdefs.h
90367@@ -0,0 +1,140 @@
90368+#ifndef GRDEFS_H
90369+#define GRDEFS_H
90370+
90371+/* Begin grsecurity status declarations */
90372+
90373+enum {
90374+ GR_READY = 0x01,
90375+ GR_STATUS_INIT = 0x00 // disabled state
90376+};
90377+
90378+/* Begin ACL declarations */
90379+
90380+/* Role flags */
90381+
90382+enum {
90383+ GR_ROLE_USER = 0x0001,
90384+ GR_ROLE_GROUP = 0x0002,
90385+ GR_ROLE_DEFAULT = 0x0004,
90386+ GR_ROLE_SPECIAL = 0x0008,
90387+ GR_ROLE_AUTH = 0x0010,
90388+ GR_ROLE_NOPW = 0x0020,
90389+ GR_ROLE_GOD = 0x0040,
90390+ GR_ROLE_LEARN = 0x0080,
90391+ GR_ROLE_TPE = 0x0100,
90392+ GR_ROLE_DOMAIN = 0x0200,
90393+ GR_ROLE_PAM = 0x0400,
90394+ GR_ROLE_PERSIST = 0x0800
90395+};
90396+
90397+/* ACL Subject and Object mode flags */
90398+enum {
90399+ GR_DELETED = 0x80000000
90400+};
90401+
90402+/* ACL Object-only mode flags */
90403+enum {
90404+ GR_READ = 0x00000001,
90405+ GR_APPEND = 0x00000002,
90406+ GR_WRITE = 0x00000004,
90407+ GR_EXEC = 0x00000008,
90408+ GR_FIND = 0x00000010,
90409+ GR_INHERIT = 0x00000020,
90410+ GR_SETID = 0x00000040,
90411+ GR_CREATE = 0x00000080,
90412+ GR_DELETE = 0x00000100,
90413+ GR_LINK = 0x00000200,
90414+ GR_AUDIT_READ = 0x00000400,
90415+ GR_AUDIT_APPEND = 0x00000800,
90416+ GR_AUDIT_WRITE = 0x00001000,
90417+ GR_AUDIT_EXEC = 0x00002000,
90418+ GR_AUDIT_FIND = 0x00004000,
90419+ GR_AUDIT_INHERIT= 0x00008000,
90420+ GR_AUDIT_SETID = 0x00010000,
90421+ GR_AUDIT_CREATE = 0x00020000,
90422+ GR_AUDIT_DELETE = 0x00040000,
90423+ GR_AUDIT_LINK = 0x00080000,
90424+ GR_PTRACERD = 0x00100000,
90425+ GR_NOPTRACE = 0x00200000,
90426+ GR_SUPPRESS = 0x00400000,
90427+ GR_NOLEARN = 0x00800000,
90428+ GR_INIT_TRANSFER= 0x01000000
90429+};
90430+
90431+#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
90432+ GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
90433+ GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
90434+
90435+/* ACL subject-only mode flags */
90436+enum {
90437+ GR_KILL = 0x00000001,
90438+ GR_VIEW = 0x00000002,
90439+ GR_PROTECTED = 0x00000004,
90440+ GR_LEARN = 0x00000008,
90441+ GR_OVERRIDE = 0x00000010,
90442+ /* just a placeholder, this mode is only used in userspace */
90443+ GR_DUMMY = 0x00000020,
90444+ GR_PROTSHM = 0x00000040,
90445+ GR_KILLPROC = 0x00000080,
90446+ GR_KILLIPPROC = 0x00000100,
90447+ /* just a placeholder, this mode is only used in userspace */
90448+ GR_NOTROJAN = 0x00000200,
90449+ GR_PROTPROCFD = 0x00000400,
90450+ GR_PROCACCT = 0x00000800,
90451+ GR_RELAXPTRACE = 0x00001000,
90452+ //GR_NESTED = 0x00002000,
90453+ GR_INHERITLEARN = 0x00004000,
90454+ GR_PROCFIND = 0x00008000,
90455+ GR_POVERRIDE = 0x00010000,
90456+ GR_KERNELAUTH = 0x00020000,
90457+ GR_ATSECURE = 0x00040000,
90458+ GR_SHMEXEC = 0x00080000
90459+};
90460+
90461+enum {
90462+ GR_PAX_ENABLE_SEGMEXEC = 0x0001,
90463+ GR_PAX_ENABLE_PAGEEXEC = 0x0002,
90464+ GR_PAX_ENABLE_MPROTECT = 0x0004,
90465+ GR_PAX_ENABLE_RANDMMAP = 0x0008,
90466+ GR_PAX_ENABLE_EMUTRAMP = 0x0010,
90467+ GR_PAX_DISABLE_SEGMEXEC = 0x0100,
90468+ GR_PAX_DISABLE_PAGEEXEC = 0x0200,
90469+ GR_PAX_DISABLE_MPROTECT = 0x0400,
90470+ GR_PAX_DISABLE_RANDMMAP = 0x0800,
90471+ GR_PAX_DISABLE_EMUTRAMP = 0x1000,
90472+};
90473+
90474+enum {
90475+ GR_ID_USER = 0x01,
90476+ GR_ID_GROUP = 0x02,
90477+};
90478+
90479+enum {
90480+ GR_ID_ALLOW = 0x01,
90481+ GR_ID_DENY = 0x02,
90482+};
90483+
90484+#define GR_CRASH_RES 31
90485+#define GR_UIDTABLE_MAX 500
90486+
90487+/* begin resource learning section */
90488+enum {
90489+ GR_RLIM_CPU_BUMP = 60,
90490+ GR_RLIM_FSIZE_BUMP = 50000,
90491+ GR_RLIM_DATA_BUMP = 10000,
90492+ GR_RLIM_STACK_BUMP = 1000,
90493+ GR_RLIM_CORE_BUMP = 10000,
90494+ GR_RLIM_RSS_BUMP = 500000,
90495+ GR_RLIM_NPROC_BUMP = 1,
90496+ GR_RLIM_NOFILE_BUMP = 5,
90497+ GR_RLIM_MEMLOCK_BUMP = 50000,
90498+ GR_RLIM_AS_BUMP = 500000,
90499+ GR_RLIM_LOCKS_BUMP = 2,
90500+ GR_RLIM_SIGPENDING_BUMP = 5,
90501+ GR_RLIM_MSGQUEUE_BUMP = 10000,
90502+ GR_RLIM_NICE_BUMP = 1,
90503+ GR_RLIM_RTPRIO_BUMP = 1,
90504+ GR_RLIM_RTTIME_BUMP = 1000000
90505+};
90506+
90507+#endif
90508diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h
90509new file mode 100644
90510index 0000000..fb1de5d
90511--- /dev/null
90512+++ b/include/linux/grinternal.h
90513@@ -0,0 +1,230 @@
90514+#ifndef __GRINTERNAL_H
90515+#define __GRINTERNAL_H
90516+
90517+#ifdef CONFIG_GRKERNSEC
90518+
90519+#include <linux/fs.h>
90520+#include <linux/mnt_namespace.h>
90521+#include <linux/nsproxy.h>
90522+#include <linux/gracl.h>
90523+#include <linux/grdefs.h>
90524+#include <linux/grmsg.h>
90525+
90526+void gr_add_learn_entry(const char *fmt, ...)
90527+ __attribute__ ((format (printf, 1, 2)));
90528+__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
90529+ const struct vfsmount *mnt);
90530+__u32 gr_check_create(const struct dentry *new_dentry,
90531+ const struct dentry *parent,
90532+ const struct vfsmount *mnt, const __u32 mode);
90533+int gr_check_protected_task(const struct task_struct *task);
90534+__u32 to_gr_audit(const __u32 reqmode);
90535+int gr_set_acls(const int type);
90536+int gr_acl_is_enabled(void);
90537+char gr_roletype_to_char(void);
90538+
90539+void gr_handle_alertkill(struct task_struct *task);
90540+char *gr_to_filename(const struct dentry *dentry,
90541+ const struct vfsmount *mnt);
90542+char *gr_to_filename1(const struct dentry *dentry,
90543+ const struct vfsmount *mnt);
90544+char *gr_to_filename2(const struct dentry *dentry,
90545+ const struct vfsmount *mnt);
90546+char *gr_to_filename3(const struct dentry *dentry,
90547+ const struct vfsmount *mnt);
90548+
90549+extern int grsec_enable_ptrace_readexec;
90550+extern int grsec_enable_harden_ptrace;
90551+extern int grsec_enable_link;
90552+extern int grsec_enable_fifo;
90553+extern int grsec_enable_execve;
90554+extern int grsec_enable_shm;
90555+extern int grsec_enable_execlog;
90556+extern int grsec_enable_signal;
90557+extern int grsec_enable_audit_ptrace;
90558+extern int grsec_enable_forkfail;
90559+extern int grsec_enable_time;
90560+extern int grsec_enable_rofs;
90561+extern int grsec_deny_new_usb;
90562+extern int grsec_enable_chroot_shmat;
90563+extern int grsec_enable_chroot_mount;
90564+extern int grsec_enable_chroot_double;
90565+extern int grsec_enable_chroot_pivot;
90566+extern int grsec_enable_chroot_chdir;
90567+extern int grsec_enable_chroot_chmod;
90568+extern int grsec_enable_chroot_mknod;
90569+extern int grsec_enable_chroot_fchdir;
90570+extern int grsec_enable_chroot_nice;
90571+extern int grsec_enable_chroot_execlog;
90572+extern int grsec_enable_chroot_caps;
90573+extern int grsec_enable_chroot_rename;
90574+extern int grsec_enable_chroot_sysctl;
90575+extern int grsec_enable_chroot_unix;
90576+extern int grsec_enable_symlinkown;
90577+extern kgid_t grsec_symlinkown_gid;
90578+extern int grsec_enable_tpe;
90579+extern kgid_t grsec_tpe_gid;
90580+extern int grsec_enable_tpe_all;
90581+extern int grsec_enable_tpe_invert;
90582+extern int grsec_enable_socket_all;
90583+extern kgid_t grsec_socket_all_gid;
90584+extern int grsec_enable_socket_client;
90585+extern kgid_t grsec_socket_client_gid;
90586+extern int grsec_enable_socket_server;
90587+extern kgid_t grsec_socket_server_gid;
90588+extern kgid_t grsec_audit_gid;
90589+extern int grsec_enable_group;
90590+extern int grsec_enable_log_rwxmaps;
90591+extern int grsec_enable_mount;
90592+extern int grsec_enable_chdir;
90593+extern int grsec_resource_logging;
90594+extern int grsec_enable_blackhole;
90595+extern int grsec_lastack_retries;
90596+extern int grsec_enable_brute;
90597+extern int grsec_enable_harden_ipc;
90598+extern int grsec_lock;
90599+
90600+extern spinlock_t grsec_alert_lock;
90601+extern unsigned long grsec_alert_wtime;
90602+extern unsigned long grsec_alert_fyet;
90603+
90604+extern spinlock_t grsec_audit_lock;
90605+
90606+extern rwlock_t grsec_exec_file_lock;
90607+
90608+#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
90609+ gr_to_filename2((tsk)->exec_file->f_path.dentry, \
90610+ (tsk)->exec_file->f_path.mnt) : "/")
90611+
90612+#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
90613+ gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
90614+ (tsk)->real_parent->exec_file->f_path.mnt) : "/")
90615+
90616+#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
90617+ gr_to_filename((tsk)->exec_file->f_path.dentry, \
90618+ (tsk)->exec_file->f_path.mnt) : "/")
90619+
90620+#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
90621+ gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
90622+ (tsk)->real_parent->exec_file->f_path.mnt) : "/")
90623+
90624+#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
90625+
90626+#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
90627+
90628+static inline bool gr_is_same_file(const struct file *file1, const struct file *file2)
90629+{
90630+ if (file1 && file2) {
90631+ const struct inode *inode1 = file1->f_path.dentry->d_inode;
90632+ const struct inode *inode2 = file2->f_path.dentry->d_inode;
90633+ if (inode1->i_ino == inode2->i_ino && inode1->i_sb->s_dev == inode2->i_sb->s_dev)
90634+ return true;
90635+ }
90636+
90637+ return false;
90638+}
90639+
90640+#define GR_CHROOT_CAPS {{ \
90641+ CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
90642+ CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
90643+ CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
90644+ CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
90645+ CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
90646+ CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
90647+ CAP_TO_MASK(CAP_SYSLOG) | CAP_TO_MASK(CAP_MAC_ADMIN) }}
90648+
90649+#define security_learn(normal_msg,args...) \
90650+({ \
90651+ read_lock(&grsec_exec_file_lock); \
90652+ gr_add_learn_entry(normal_msg "\n", ## args); \
90653+ read_unlock(&grsec_exec_file_lock); \
90654+})
90655+
90656+enum {
90657+ GR_DO_AUDIT,
90658+ GR_DONT_AUDIT,
90659+ /* used for non-audit messages that we shouldn't kill the task on */
90660+ GR_DONT_AUDIT_GOOD
90661+};
90662+
90663+enum {
90664+ GR_TTYSNIFF,
90665+ GR_RBAC,
90666+ GR_RBAC_STR,
90667+ GR_STR_RBAC,
90668+ GR_RBAC_MODE2,
90669+ GR_RBAC_MODE3,
90670+ GR_FILENAME,
90671+ GR_SYSCTL_HIDDEN,
90672+ GR_NOARGS,
90673+ GR_ONE_INT,
90674+ GR_ONE_INT_TWO_STR,
90675+ GR_ONE_STR,
90676+ GR_STR_INT,
90677+ GR_TWO_STR_INT,
90678+ GR_TWO_INT,
90679+ GR_TWO_U64,
90680+ GR_THREE_INT,
90681+ GR_FIVE_INT_TWO_STR,
90682+ GR_TWO_STR,
90683+ GR_THREE_STR,
90684+ GR_FOUR_STR,
90685+ GR_STR_FILENAME,
90686+ GR_FILENAME_STR,
90687+ GR_FILENAME_TWO_INT,
90688+ GR_FILENAME_TWO_INT_STR,
90689+ GR_TEXTREL,
90690+ GR_PTRACE,
90691+ GR_RESOURCE,
90692+ GR_CAP,
90693+ GR_SIG,
90694+ GR_SIG2,
90695+ GR_CRASH1,
90696+ GR_CRASH2,
90697+ GR_PSACCT,
90698+ GR_RWXMAP,
90699+ GR_RWXMAPVMA
90700+};
90701+
90702+#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
90703+#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
90704+#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
90705+#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
90706+#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
90707+#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
90708+#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
90709+#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
90710+#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
90711+#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
90712+#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
90713+#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
90714+#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
90715+#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
90716+#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
90717+#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
90718+#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
90719+#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
90720+#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
90721+#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
90722+#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
90723+#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
90724+#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
90725+#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
90726+#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
90727+#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
90728+#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
90729+#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
90730+#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
90731+#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
90732+#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
90733+#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
90734+#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
90735+#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
90736+#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
90737+#define gr_log_rwxmap_vma(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAPVMA, str)
90738+
90739+void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
90740+
90741+#endif
90742+
90743+#endif
90744diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h
90745new file mode 100644
90746index 0000000..26ef560
90747--- /dev/null
90748+++ b/include/linux/grmsg.h
90749@@ -0,0 +1,118 @@
90750+#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
90751+#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
90752+#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
90753+#define GR_STOPMOD_MSG "denied modification of module state by "
90754+#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
90755+#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
90756+#define GR_IOPERM_MSG "denied use of ioperm() by "
90757+#define GR_IOPL_MSG "denied use of iopl() by "
90758+#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
90759+#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
90760+#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
90761+#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
90762+#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
90763+#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
90764+#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
90765+#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
90766+#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
90767+#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
90768+#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
90769+#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
90770+#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
90771+#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
90772+#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
90773+#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
90774+#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
90775+#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
90776+#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
90777+#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
90778+#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
90779+#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
90780+#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
90781+#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
90782+#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
90783+#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
90784+#define GR_EXEC_TPE_MSG "denied untrusted exec (due to %.70s) of %.950s by "
90785+#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
90786+#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
90787+#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
90788+#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
90789+#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
90790+#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
90791+#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
90792+#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
90793+#define GR_CHROOT_RENAME_MSG "denied bad rename of %.950s out of a chroot by "
90794+#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
90795+#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
90796+#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
90797+#define GR_CHROOT_FHANDLE_MSG "denied use of file handles inside chroot by "
90798+#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
90799+#define GR_SETXATTR_ACL_MSG "%s setting extended attribute of %.950s by "
90800+#define GR_REMOVEXATTR_ACL_MSG "%s removing extended attribute of %.950s by "
90801+#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
90802+#define GR_INITF_ACL_MSG "init_variables() failed %s by "
90803+#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
90804+#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbage by "
90805+#define GR_SHUTS_ACL_MSG "shutdown auth success for "
90806+#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
90807+#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
90808+#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
90809+#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
90810+#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
90811+#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
90812+#define GR_ENABLEF_ACL_MSG "unable to load %s for "
90813+#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
90814+#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
90815+#define GR_RELOADF_ACL_MSG "failed reload of %s for "
90816+#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
90817+#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
90818+#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
90819+#define GR_SPROLEF_ACL_MSG "special role %s failure for "
90820+#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
90821+#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
90822+#define GR_INVMODE_ACL_MSG "invalid mode %d by "
90823+#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
90824+#define GR_FAILFORK_MSG "failed fork with errno %s by "
90825+#define GR_NICE_CHROOT_MSG "denied priority change by "
90826+#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
90827+#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
90828+#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
90829+#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
90830+#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
90831+#define GR_TIME_MSG "time set by "
90832+#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
90833+#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
90834+#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
90835+#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
90836+#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
90837+#define GR_BIND_MSG "denied bind() by "
90838+#define GR_CONNECT_MSG "denied connect() by "
90839+#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
90840+#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
90841+#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
90842+#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
90843+#define GR_CAP_ACL_MSG "use of %s denied for "
90844+#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
90845+#define GR_CAP_ACL_MSG2 "use of %s permitted for "
90846+#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
90847+#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
90848+#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
90849+#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
90850+#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
90851+#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
90852+#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
90853+#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
90854+#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
90855+#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
90856+#define GR_TEXTREL_AUDIT_MSG "denied text relocation in %.950s, VMA:0x%08lx 0x%08lx by "
90857+#define GR_PTGNUSTACK_MSG "denied marking stack executable as requested by PT_GNU_STACK marking in %.950s by "
90858+#define GR_VM86_MSG "denied use of vm86 by "
90859+#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
90860+#define GR_PTRACE_READEXEC_MSG "denied ptrace of unreadable binary %.950s by "
90861+#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
90862+#define GR_BADPROCPID_MSG "denied read of sensitive /proc/pid/%s entry via fd passed across exec by "
90863+#define GR_SYMLINKOWNER_MSG "denied following symlink %.950s since symlink owner %u does not match target owner %u, by "
90864+#define GR_BRUTE_DAEMON_MSG "bruteforce prevention initiated for the next 30 minutes or until service restarted, stalling each fork 30 seconds. Please investigate the crash report for "
90865+#define GR_BRUTE_SUID_MSG "bruteforce prevention initiated due to crash of %.950s against uid %u, banning suid/sgid execs for %u minutes. Please investigate the crash report for "
90866+#define GR_IPC_DENIED_MSG "denied %s of overly-permissive IPC object with creator uid %u by "
90867+#define GR_MSRWRITE_MSG "denied write to CPU MSR by "
90868diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
90869new file mode 100644
90870index 0000000..63c1850
90871--- /dev/null
90872+++ b/include/linux/grsecurity.h
90873@@ -0,0 +1,250 @@
90874+#ifndef GR_SECURITY_H
90875+#define GR_SECURITY_H
90876+#include <linux/fs.h>
90877+#include <linux/fs_struct.h>
90878+#include <linux/binfmts.h>
90879+#include <linux/gracl.h>
90880+
90881+/* notify of brain-dead configs */
90882+#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
90883+#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
90884+#endif
90885+#if defined(CONFIG_GRKERNSEC_PROC) && !defined(CONFIG_GRKERNSEC_PROC_USER) && !defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
90886+#error "CONFIG_GRKERNSEC_PROC enabled, but neither CONFIG_GRKERNSEC_PROC_USER nor CONFIG_GRKERNSEC_PROC_USERGROUP enabled"
90887+#endif
90888+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
90889+#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
90890+#endif
90891+#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
90892+#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
90893+#endif
90894+#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
90895+#error "CONFIG_PAX enabled, but no PaX options are enabled."
90896+#endif
90897+
90898+int gr_handle_new_usb(void);
90899+
90900+void gr_handle_brute_attach(int dumpable);
90901+void gr_handle_brute_check(void);
90902+void gr_handle_kernel_exploit(void);
90903+
90904+char gr_roletype_to_char(void);
90905+
90906+int gr_proc_is_restricted(void);
90907+
90908+int gr_acl_enable_at_secure(void);
90909+
90910+int gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs);
90911+int gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs);
90912+
90913+int gr_learn_cap(const struct task_struct *task, const struct cred *cred, const int cap);
90914+
90915+void gr_del_task_from_ip_table(struct task_struct *p);
90916+
90917+int gr_pid_is_chrooted(struct task_struct *p);
90918+int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
90919+int gr_handle_chroot_nice(void);
90920+int gr_handle_chroot_sysctl(const int op);
90921+int gr_handle_chroot_setpriority(struct task_struct *p,
90922+ const int niceval);
90923+int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
90924+int gr_chroot_fhandle(void);
90925+int gr_handle_chroot_chroot(const struct dentry *dentry,
90926+ const struct vfsmount *mnt);
90927+void gr_handle_chroot_chdir(const struct path *path);
90928+int gr_handle_chroot_chmod(const struct dentry *dentry,
90929+ const struct vfsmount *mnt, const int mode);
90930+int gr_handle_chroot_mknod(const struct dentry *dentry,
90931+ const struct vfsmount *mnt, const int mode);
90932+int gr_handle_chroot_mount(const struct dentry *dentry,
90933+ const struct vfsmount *mnt,
90934+ const char *dev_name);
90935+int gr_handle_chroot_pivot(void);
90936+int gr_handle_chroot_unix(const pid_t pid);
90937+
90938+int gr_handle_rawio(const struct inode *inode);
90939+
90940+void gr_handle_ioperm(void);
90941+void gr_handle_iopl(void);
90942+void gr_handle_msr_write(void);
90943+
90944+umode_t gr_acl_umask(void);
90945+
90946+int gr_tpe_allow(const struct file *file);
90947+
90948+void gr_set_chroot_entries(struct task_struct *task, const struct path *path);
90949+void gr_clear_chroot_entries(struct task_struct *task);
90950+
90951+void gr_log_forkfail(const int retval);
90952+void gr_log_timechange(void);
90953+void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
90954+void gr_log_chdir(const struct dentry *dentry,
90955+ const struct vfsmount *mnt);
90956+void gr_log_chroot_exec(const struct dentry *dentry,
90957+ const struct vfsmount *mnt);
90958+void gr_log_remount(const char *devname, const int retval);
90959+void gr_log_unmount(const char *devname, const int retval);
90960+void gr_log_mount(const char *from, struct path *to, const int retval);
90961+void gr_log_textrel(struct vm_area_struct *vma);
90962+void gr_log_ptgnustack(struct file *file);
90963+void gr_log_rwxmmap(struct file *file);
90964+void gr_log_rwxmprotect(struct vm_area_struct *vma);
90965+
90966+int gr_handle_follow_link(const struct inode *parent,
90967+ const struct inode *inode,
90968+ const struct dentry *dentry,
90969+ const struct vfsmount *mnt);
90970+int gr_handle_fifo(const struct dentry *dentry,
90971+ const struct vfsmount *mnt,
90972+ const struct dentry *dir, const int flag,
90973+ const int acc_mode);
90974+int gr_handle_hardlink(const struct dentry *dentry,
90975+ const struct vfsmount *mnt,
90976+ struct inode *inode,
90977+ const int mode, const struct filename *to);
90978+
90979+int gr_is_capable(const int cap);
90980+int gr_is_capable_nolog(const int cap);
90981+int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
90982+int gr_task_is_capable_nolog(const struct task_struct *task, const int cap);
90983+
90984+void gr_copy_label(struct task_struct *tsk);
90985+void gr_handle_crash(struct task_struct *task, const int sig);
90986+int gr_handle_signal(const struct task_struct *p, const int sig);
90987+int gr_check_crash_uid(const kuid_t uid);
90988+int gr_check_protected_task(const struct task_struct *task);
90989+int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
90990+int gr_acl_handle_mmap(const struct file *file,
90991+ const unsigned long prot);
90992+int gr_acl_handle_mprotect(const struct file *file,
90993+ const unsigned long prot);
90994+int gr_check_hidden_task(const struct task_struct *tsk);
90995+__u32 gr_acl_handle_truncate(const struct dentry *dentry,
90996+ const struct vfsmount *mnt);
90997+__u32 gr_acl_handle_utime(const struct dentry *dentry,
90998+ const struct vfsmount *mnt);
90999+__u32 gr_acl_handle_access(const struct dentry *dentry,
91000+ const struct vfsmount *mnt, const int fmode);
91001+__u32 gr_acl_handle_chmod(const struct dentry *dentry,
91002+ const struct vfsmount *mnt, umode_t *mode);
91003+__u32 gr_acl_handle_chown(const struct dentry *dentry,
91004+ const struct vfsmount *mnt);
91005+__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
91006+ const struct vfsmount *mnt);
91007+__u32 gr_acl_handle_removexattr(const struct dentry *dentry,
91008+ const struct vfsmount *mnt);
91009+int gr_handle_ptrace(struct task_struct *task, const long request);
91010+int gr_handle_proc_ptrace(struct task_struct *task);
91011+__u32 gr_acl_handle_execve(const struct dentry *dentry,
91012+ const struct vfsmount *mnt);
91013+int gr_check_crash_exec(const struct file *filp);
91014+int gr_acl_is_enabled(void);
91015+void gr_set_role_label(struct task_struct *task, const kuid_t uid,
91016+ const kgid_t gid);
91017+int gr_set_proc_label(const struct dentry *dentry,
91018+ const struct vfsmount *mnt,
91019+ const int unsafe_flags);
91020+__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
91021+ const struct vfsmount *mnt);
91022+__u32 gr_acl_handle_open(const struct dentry *dentry,
91023+ const struct vfsmount *mnt, int acc_mode);
91024+__u32 gr_acl_handle_creat(const struct dentry *dentry,
91025+ const struct dentry *p_dentry,
91026+ const struct vfsmount *p_mnt,
91027+ int open_flags, int acc_mode, const int imode);
91028+void gr_handle_create(const struct dentry *dentry,
91029+ const struct vfsmount *mnt);
91030+void gr_handle_proc_create(const struct dentry *dentry,
91031+ const struct inode *inode);
91032+__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
91033+ const struct dentry *parent_dentry,
91034+ const struct vfsmount *parent_mnt,
91035+ const int mode);
91036+__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
91037+ const struct dentry *parent_dentry,
91038+ const struct vfsmount *parent_mnt);
91039+__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
91040+ const struct vfsmount *mnt);
91041+void gr_handle_delete(const u64 ino, const dev_t dev);
91042+__u32 gr_acl_handle_unlink(const struct dentry *dentry,
91043+ const struct vfsmount *mnt);
91044+__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
91045+ const struct dentry *parent_dentry,
91046+ const struct vfsmount *parent_mnt,
91047+ const struct filename *from);
91048+__u32 gr_acl_handle_link(const struct dentry *new_dentry,
91049+ const struct dentry *parent_dentry,
91050+ const struct vfsmount *parent_mnt,
91051+ const struct dentry *old_dentry,
91052+ const struct vfsmount *old_mnt, const struct filename *to);
91053+int gr_handle_symlink_owner(const struct path *link, const struct inode *target);
91054+int gr_acl_handle_rename(struct dentry *new_dentry,
91055+ struct dentry *parent_dentry,
91056+ const struct vfsmount *parent_mnt,
91057+ struct dentry *old_dentry,
91058+ struct inode *old_parent_inode,
91059+ struct vfsmount *old_mnt, const struct filename *newname, unsigned int flags);
91060+void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
91061+ struct dentry *old_dentry,
91062+ struct dentry *new_dentry,
91063+ struct vfsmount *mnt, const __u8 replace, unsigned int flags);
91064+__u32 gr_check_link(const struct dentry *new_dentry,
91065+ const struct dentry *parent_dentry,
91066+ const struct vfsmount *parent_mnt,
91067+ const struct dentry *old_dentry,
91068+ const struct vfsmount *old_mnt);
91069+int gr_acl_handle_filldir(const struct file *file, const char *name,
91070+ const unsigned int namelen, const u64 ino);
91071+
91072+__u32 gr_acl_handle_unix(const struct dentry *dentry,
91073+ const struct vfsmount *mnt);
91074+void gr_acl_handle_exit(void);
91075+void gr_acl_handle_psacct(struct task_struct *task, const long code);
91076+int gr_acl_handle_procpidmem(const struct task_struct *task);
91077+int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
91078+int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
91079+void gr_audit_ptrace(struct task_struct *task);
91080+dev_t gr_get_dev_from_dentry(struct dentry *dentry);
91081+u64 gr_get_ino_from_dentry(struct dentry *dentry);
91082+void gr_put_exec_file(struct task_struct *task);
91083+
91084+int gr_ptrace_readexec(struct file *file, int unsafe_flags);
91085+
91086+void gr_inc_chroot_refcnts(struct dentry *dentry, struct vfsmount *mnt);
91087+void gr_dec_chroot_refcnts(struct dentry *dentry, struct vfsmount *mnt);
91088+int gr_bad_chroot_rename(struct dentry *olddentry, struct vfsmount *oldmnt,
91089+ struct dentry *newdentry, struct vfsmount *newmnt);
91090+
91091+#ifdef CONFIG_GRKERNSEC_RESLOG
91092+extern void gr_log_resource(const struct task_struct *task, const int res,
91093+ const unsigned long wanted, const int gt);
91094+#else
91095+static inline void gr_log_resource(const struct task_struct *task, const int res,
91096+ const unsigned long wanted, const int gt)
91097+{
91098+}
91099+#endif
91100+
91101+#ifdef CONFIG_GRKERNSEC
91102+void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
91103+void gr_handle_vm86(void);
91104+void gr_handle_mem_readwrite(u64 from, u64 to);
91105+
91106+void gr_log_badprocpid(const char *entry);
91107+
91108+extern int grsec_enable_dmesg;
91109+extern int grsec_disable_privio;
91110+
91111+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
91112+extern kgid_t grsec_proc_gid;
91113+#endif
91114+
91115+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
91116+extern int grsec_enable_chroot_findtask;
91117+#endif
91118+#ifdef CONFIG_GRKERNSEC_SETXID
91119+extern int grsec_enable_setxid;
91120+#endif
91121+#endif
91122+
91123+#endif
91124diff --git a/include/linux/grsock.h b/include/linux/grsock.h
91125new file mode 100644
91126index 0000000..e7ffaaf
91127--- /dev/null
91128+++ b/include/linux/grsock.h
91129@@ -0,0 +1,19 @@
91130+#ifndef __GRSOCK_H
91131+#define __GRSOCK_H
91132+
91133+extern void gr_attach_curr_ip(const struct sock *sk);
91134+extern int gr_handle_sock_all(const int family, const int type,
91135+ const int protocol);
91136+extern int gr_handle_sock_server(const struct sockaddr *sck);
91137+extern int gr_handle_sock_server_other(const struct sock *sck);
91138+extern int gr_handle_sock_client(const struct sockaddr *sck);
91139+extern int gr_search_connect(struct socket * sock,
91140+ struct sockaddr_in * addr);
91141+extern int gr_search_bind(struct socket * sock,
91142+ struct sockaddr_in * addr);
91143+extern int gr_search_listen(struct socket * sock);
91144+extern int gr_search_accept(struct socket * sock);
91145+extern int gr_search_socket(const int domain, const int type,
91146+ const int protocol);
91147+
91148+#endif
91149diff --git a/include/linux/highmem.h b/include/linux/highmem.h
91150index 9286a46..373f27f 100644
91151--- a/include/linux/highmem.h
91152+++ b/include/linux/highmem.h
91153@@ -189,6 +189,18 @@ static inline void clear_highpage(struct page *page)
91154 kunmap_atomic(kaddr);
91155 }
91156
91157+static inline void sanitize_highpage(struct page *page)
91158+{
91159+ void *kaddr;
91160+ unsigned long flags;
91161+
91162+ local_irq_save(flags);
91163+ kaddr = kmap_atomic(page);
91164+ clear_page(kaddr);
91165+ kunmap_atomic(kaddr);
91166+ local_irq_restore(flags);
91167+}
91168+
91169 static inline void zero_user_segments(struct page *page,
91170 unsigned start1, unsigned end1,
91171 unsigned start2, unsigned end2)
91172diff --git a/include/linux/hwmon-sysfs.h b/include/linux/hwmon-sysfs.h
91173index 1c7b89a..7dda400 100644
91174--- a/include/linux/hwmon-sysfs.h
91175+++ b/include/linux/hwmon-sysfs.h
91176@@ -25,7 +25,8 @@
91177 struct sensor_device_attribute{
91178 struct device_attribute dev_attr;
91179 int index;
91180-};
91181+} __do_const;
91182+typedef struct sensor_device_attribute __no_const sensor_device_attribute_no_const;
91183 #define to_sensor_dev_attr(_dev_attr) \
91184 container_of(_dev_attr, struct sensor_device_attribute, dev_attr)
91185
91186@@ -41,7 +42,8 @@ struct sensor_device_attribute_2 {
91187 struct device_attribute dev_attr;
91188 u8 index;
91189 u8 nr;
91190-};
91191+} __do_const;
91192+typedef struct sensor_device_attribute_2 __no_const sensor_device_attribute_2_no_const;
91193 #define to_sensor_dev_attr_2(_dev_attr) \
91194 container_of(_dev_attr, struct sensor_device_attribute_2, dev_attr)
91195
91196diff --git a/include/linux/i2c.h b/include/linux/i2c.h
91197index f17da50..2f8b203 100644
91198--- a/include/linux/i2c.h
91199+++ b/include/linux/i2c.h
91200@@ -409,6 +409,7 @@ struct i2c_algorithm {
91201 int (*unreg_slave)(struct i2c_client *client);
91202 #endif
91203 };
91204+typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
91205
91206 /**
91207 * struct i2c_bus_recovery_info - I2C bus recovery information
91208diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h
91209index aff7ad8..3942bbd 100644
91210--- a/include/linux/if_pppox.h
91211+++ b/include/linux/if_pppox.h
91212@@ -76,7 +76,7 @@ struct pppox_proto {
91213 int (*ioctl)(struct socket *sock, unsigned int cmd,
91214 unsigned long arg);
91215 struct module *owner;
91216-};
91217+} __do_const;
91218
91219 extern int register_pppox_proto(int proto_num, const struct pppox_proto *pp);
91220 extern void unregister_pppox_proto(int proto_num);
91221diff --git a/include/linux/init.h b/include/linux/init.h
91222index 2df8e8d..3e1280d 100644
91223--- a/include/linux/init.h
91224+++ b/include/linux/init.h
91225@@ -37,9 +37,17 @@
91226 * section.
91227 */
91228
91229+#define add_init_latent_entropy __latent_entropy
91230+
91231+#ifdef CONFIG_MEMORY_HOTPLUG
91232+#define add_meminit_latent_entropy
91233+#else
91234+#define add_meminit_latent_entropy __latent_entropy
91235+#endif
91236+
91237 /* These are for everybody (although not all archs will actually
91238 discard it in modules) */
91239-#define __init __section(.init.text) __cold notrace
91240+#define __init __section(.init.text) __cold notrace add_init_latent_entropy
91241 #define __initdata __section(.init.data)
91242 #define __initconst __constsection(.init.rodata)
91243 #define __exitdata __section(.exit.data)
91244@@ -100,7 +108,7 @@
91245 #define __cpuexitconst
91246
91247 /* Used for MEMORY_HOTPLUG */
91248-#define __meminit __section(.meminit.text) __cold notrace
91249+#define __meminit __section(.meminit.text) __cold notrace add_meminit_latent_entropy
91250 #define __meminitdata __section(.meminit.data)
91251 #define __meminitconst __constsection(.meminit.rodata)
91252 #define __memexit __section(.memexit.text) __exitused __cold notrace
91253diff --git a/include/linux/init_task.h b/include/linux/init_task.h
91254index 696d223..6d6b39f 100644
91255--- a/include/linux/init_task.h
91256+++ b/include/linux/init_task.h
91257@@ -158,6 +158,12 @@ extern struct task_group root_task_group;
91258
91259 #define INIT_TASK_COMM "swapper"
91260
91261+#ifdef CONFIG_X86
91262+#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
91263+#else
91264+#define INIT_TASK_THREAD_INFO
91265+#endif
91266+
91267 #ifdef CONFIG_RT_MUTEXES
91268 # define INIT_RT_MUTEXES(tsk) \
91269 .pi_waiters = RB_ROOT, \
91270@@ -224,6 +230,7 @@ extern struct task_group root_task_group;
91271 RCU_POINTER_INITIALIZER(cred, &init_cred), \
91272 .comm = INIT_TASK_COMM, \
91273 .thread = INIT_THREAD, \
91274+ INIT_TASK_THREAD_INFO \
91275 .fs = &init_fs, \
91276 .files = &init_files, \
91277 .signal = &init_signals, \
91278diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
91279index 2e88580..f6a99a0 100644
91280--- a/include/linux/interrupt.h
91281+++ b/include/linux/interrupt.h
91282@@ -420,8 +420,8 @@ extern const char * const softirq_to_name[NR_SOFTIRQS];
91283
91284 struct softirq_action
91285 {
91286- void (*action)(struct softirq_action *);
91287-};
91288+ void (*action)(void);
91289+} __no_const;
91290
91291 asmlinkage void do_softirq(void);
91292 asmlinkage void __do_softirq(void);
91293@@ -435,7 +435,7 @@ static inline void do_softirq_own_stack(void)
91294 }
91295 #endif
91296
91297-extern void open_softirq(int nr, void (*action)(struct softirq_action *));
91298+extern void open_softirq(int nr, void (*action)(void));
91299 extern void softirq_init(void);
91300 extern void __raise_softirq_irqoff(unsigned int nr);
91301
91302diff --git a/include/linux/iommu.h b/include/linux/iommu.h
91303index 38daa45..4de4317 100644
91304--- a/include/linux/iommu.h
91305+++ b/include/linux/iommu.h
91306@@ -147,7 +147,7 @@ struct iommu_ops {
91307
91308 unsigned long pgsize_bitmap;
91309 void *priv;
91310-};
91311+} __do_const;
91312
91313 #define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */
91314 #define IOMMU_GROUP_NOTIFY_DEL_DEVICE 2 /* Pre Device removed */
91315diff --git a/include/linux/ioport.h b/include/linux/ioport.h
91316index 2c525022..345b106 100644
91317--- a/include/linux/ioport.h
91318+++ b/include/linux/ioport.h
91319@@ -161,7 +161,7 @@ struct resource *lookup_resource(struct resource *root, resource_size_t start);
91320 int adjust_resource(struct resource *res, resource_size_t start,
91321 resource_size_t size);
91322 resource_size_t resource_alignment(struct resource *res);
91323-static inline resource_size_t resource_size(const struct resource *res)
91324+static inline resource_size_t __intentional_overflow(-1) resource_size(const struct resource *res)
91325 {
91326 return res->end - res->start + 1;
91327 }
91328diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h
91329index 1eee6bc..9cf4912 100644
91330--- a/include/linux/ipc_namespace.h
91331+++ b/include/linux/ipc_namespace.h
91332@@ -60,7 +60,7 @@ struct ipc_namespace {
91333 struct user_namespace *user_ns;
91334
91335 struct ns_common ns;
91336-};
91337+} __randomize_layout;
91338
91339 extern struct ipc_namespace init_ipc_ns;
91340 extern atomic_t nr_ipc_ns;
91341diff --git a/include/linux/irq.h b/include/linux/irq.h
91342index d09ec7a..f373eb5 100644
91343--- a/include/linux/irq.h
91344+++ b/include/linux/irq.h
91345@@ -364,7 +364,8 @@ struct irq_chip {
91346 void (*irq_write_msi_msg)(struct irq_data *data, struct msi_msg *msg);
91347
91348 unsigned long flags;
91349-};
91350+} __do_const;
91351+typedef struct irq_chip __no_const irq_chip_no_const;
91352
91353 /*
91354 * irq_chip specific flags
91355diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h
91356index 71d706d..817cdec 100644
91357--- a/include/linux/irqchip/arm-gic.h
91358+++ b/include/linux/irqchip/arm-gic.h
91359@@ -95,7 +95,7 @@
91360
91361 struct device_node;
91362
91363-extern struct irq_chip gic_arch_extn;
91364+extern irq_chip_no_const gic_arch_extn;
91365
91366 void gic_init_bases(unsigned int, int, void __iomem *, void __iomem *,
91367 u32 offset, struct device_node *);
91368diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
91369index dd1109f..4f4fdda 100644
91370--- a/include/linux/irqdesc.h
91371+++ b/include/linux/irqdesc.h
91372@@ -61,7 +61,7 @@ struct irq_desc {
91373 unsigned int irq_count; /* For detecting broken IRQs */
91374 unsigned long last_unhandled; /* Aging timer for unhandled count */
91375 unsigned int irqs_unhandled;
91376- atomic_t threads_handled;
91377+ atomic_unchecked_t threads_handled;
91378 int threads_handled_last;
91379 raw_spinlock_t lock;
91380 struct cpumask *percpu_enabled;
91381diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
91382index 676d730..8d3a1ad 100644
91383--- a/include/linux/irqdomain.h
91384+++ b/include/linux/irqdomain.h
91385@@ -40,6 +40,7 @@ struct device_node;
91386 struct irq_domain;
91387 struct of_device_id;
91388 struct irq_chip;
91389+typedef struct irq_chip __no_const irq_chip_no_const;
91390 struct irq_data;
91391
91392 /* Number of irqs reserved for a legacy isa controller */
91393diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
91394index c367cbd..c9b79e6 100644
91395--- a/include/linux/jiffies.h
91396+++ b/include/linux/jiffies.h
91397@@ -280,20 +280,20 @@ extern unsigned long preset_lpj;
91398 /*
91399 * Convert various time units to each other:
91400 */
91401-extern unsigned int jiffies_to_msecs(const unsigned long j);
91402-extern unsigned int jiffies_to_usecs(const unsigned long j);
91403+extern unsigned int jiffies_to_msecs(const unsigned long j) __intentional_overflow(-1);
91404+extern unsigned int jiffies_to_usecs(const unsigned long j) __intentional_overflow(-1);
91405
91406-static inline u64 jiffies_to_nsecs(const unsigned long j)
91407+static inline u64 __intentional_overflow(-1) jiffies_to_nsecs(const unsigned long j)
91408 {
91409 return (u64)jiffies_to_usecs(j) * NSEC_PER_USEC;
91410 }
91411
91412-extern unsigned long msecs_to_jiffies(const unsigned int m);
91413-extern unsigned long usecs_to_jiffies(const unsigned int u);
91414+extern unsigned long msecs_to_jiffies(const unsigned int m) __intentional_overflow(-1);
91415+extern unsigned long usecs_to_jiffies(const unsigned int u) __intentional_overflow(-1);
91416 extern unsigned long timespec_to_jiffies(const struct timespec *value);
91417 extern void jiffies_to_timespec(const unsigned long jiffies,
91418- struct timespec *value);
91419-extern unsigned long timeval_to_jiffies(const struct timeval *value);
91420+ struct timespec *value) __intentional_overflow(-1);
91421+extern unsigned long timeval_to_jiffies(const struct timeval *value) __intentional_overflow(-1);
91422 extern void jiffies_to_timeval(const unsigned long jiffies,
91423 struct timeval *value);
91424
91425diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
91426index 6883e19..e854fcb 100644
91427--- a/include/linux/kallsyms.h
91428+++ b/include/linux/kallsyms.h
91429@@ -15,7 +15,8 @@
91430
91431 struct module;
91432
91433-#ifdef CONFIG_KALLSYMS
91434+#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
91435+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
91436 /* Lookup the address for a symbol. Returns 0 if not found. */
91437 unsigned long kallsyms_lookup_name(const char *name);
91438
91439@@ -106,6 +107,21 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
91440 /* Stupid that this does nothing, but I didn't create this mess. */
91441 #define __print_symbol(fmt, addr)
91442 #endif /*CONFIG_KALLSYMS*/
91443+#else /* when included by kallsyms.c, vsnprintf.c, kprobes.c, or
91444+ arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
91445+extern unsigned long kallsyms_lookup_name(const char *name);
91446+extern void __print_symbol(const char *fmt, unsigned long address);
91447+extern int sprint_backtrace(char *buffer, unsigned long address);
91448+extern int sprint_symbol(char *buffer, unsigned long address);
91449+extern int sprint_symbol_no_offset(char *buffer, unsigned long address);
91450+const char *kallsyms_lookup(unsigned long addr,
91451+ unsigned long *symbolsize,
91452+ unsigned long *offset,
91453+ char **modname, char *namebuf);
91454+extern int kallsyms_lookup_size_offset(unsigned long addr,
91455+ unsigned long *symbolsize,
91456+ unsigned long *offset);
91457+#endif
91458
91459 /* This macro allows us to keep printk typechecking */
91460 static __printf(1, 2)
91461diff --git a/include/linux/kernel.h b/include/linux/kernel.h
91462index d6d630d..feea1f5 100644
91463--- a/include/linux/kernel.h
91464+++ b/include/linux/kernel.h
91465@@ -378,7 +378,7 @@ static inline int __must_check kstrtos32_from_user(const char __user *s, size_t
91466 /* Obsolete, do not use. Use kstrto<foo> instead */
91467
91468 extern unsigned long simple_strtoul(const char *,char **,unsigned int);
91469-extern long simple_strtol(const char *,char **,unsigned int);
91470+extern long simple_strtol(const char *,char **,unsigned int) __intentional_overflow(-1);
91471 extern unsigned long long simple_strtoull(const char *,char **,unsigned int);
91472 extern long long simple_strtoll(const char *,char **,unsigned int);
91473
91474diff --git a/include/linux/key-type.h b/include/linux/key-type.h
91475index ff9f1d3..6712be5 100644
91476--- a/include/linux/key-type.h
91477+++ b/include/linux/key-type.h
91478@@ -152,7 +152,7 @@ struct key_type {
91479 /* internal fields */
91480 struct list_head link; /* link in types list */
91481 struct lock_class_key lock_class; /* key->sem lock class */
91482-};
91483+} __do_const;
91484
91485 extern struct key_type key_type_keyring;
91486
91487diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
91488index e465bb1..19f605fd 100644
91489--- a/include/linux/kgdb.h
91490+++ b/include/linux/kgdb.h
91491@@ -52,7 +52,7 @@ extern int kgdb_connected;
91492 extern int kgdb_io_module_registered;
91493
91494 extern atomic_t kgdb_setting_breakpoint;
91495-extern atomic_t kgdb_cpu_doing_single_step;
91496+extern atomic_unchecked_t kgdb_cpu_doing_single_step;
91497
91498 extern struct task_struct *kgdb_usethread;
91499 extern struct task_struct *kgdb_contthread;
91500@@ -254,7 +254,7 @@ struct kgdb_arch {
91501 void (*correct_hw_break)(void);
91502
91503 void (*enable_nmi)(bool on);
91504-};
91505+} __do_const;
91506
91507 /**
91508 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
91509@@ -279,7 +279,7 @@ struct kgdb_io {
91510 void (*pre_exception) (void);
91511 void (*post_exception) (void);
91512 int is_console;
91513-};
91514+} __do_const;
91515
91516 extern struct kgdb_arch arch_kgdb_ops;
91517
91518diff --git a/include/linux/kmemleak.h b/include/linux/kmemleak.h
91519index e705467..a92471d 100644
91520--- a/include/linux/kmemleak.h
91521+++ b/include/linux/kmemleak.h
91522@@ -27,7 +27,7 @@
91523
91524 extern void kmemleak_init(void) __ref;
91525 extern void kmemleak_alloc(const void *ptr, size_t size, int min_count,
91526- gfp_t gfp) __ref;
91527+ gfp_t gfp) __ref __size_overflow(2);
91528 extern void kmemleak_alloc_percpu(const void __percpu *ptr, size_t size) __ref;
91529 extern void kmemleak_free(const void *ptr) __ref;
91530 extern void kmemleak_free_part(const void *ptr, size_t size) __ref;
91531@@ -62,7 +62,7 @@ static inline void kmemleak_erase(void **ptr)
91532 static inline void kmemleak_init(void)
91533 {
91534 }
91535-static inline void kmemleak_alloc(const void *ptr, size_t size, int min_count,
91536+static inline void __size_overflow(2) kmemleak_alloc(const void *ptr, size_t size, int min_count,
91537 gfp_t gfp)
91538 {
91539 }
91540diff --git a/include/linux/kmod.h b/include/linux/kmod.h
91541index 0555cc6..40116ce 100644
91542--- a/include/linux/kmod.h
91543+++ b/include/linux/kmod.h
91544@@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysctl */
91545 * usually useless though. */
91546 extern __printf(2, 3)
91547 int __request_module(bool wait, const char *name, ...);
91548+extern __printf(3, 4)
91549+int ___request_module(bool wait, char *param_name, const char *name, ...);
91550 #define request_module(mod...) __request_module(true, mod)
91551 #define request_module_nowait(mod...) __request_module(false, mod)
91552 #define try_then_request_module(x, mod...) \
91553@@ -57,6 +59,9 @@ struct subprocess_info {
91554 struct work_struct work;
91555 struct completion *complete;
91556 char *path;
91557+#ifdef CONFIG_GRKERNSEC
91558+ char *origpath;
91559+#endif
91560 char **argv;
91561 char **envp;
91562 int wait;
91563diff --git a/include/linux/kobject.h b/include/linux/kobject.h
91564index 2d61b90..a1d0a13 100644
91565--- a/include/linux/kobject.h
91566+++ b/include/linux/kobject.h
91567@@ -118,7 +118,7 @@ struct kobj_type {
91568 struct attribute **default_attrs;
91569 const struct kobj_ns_type_operations *(*child_ns_type)(struct kobject *kobj);
91570 const void *(*namespace)(struct kobject *kobj);
91571-};
91572+} __do_const;
91573
91574 struct kobj_uevent_env {
91575 char *argv[3];
91576@@ -142,6 +142,7 @@ struct kobj_attribute {
91577 ssize_t (*store)(struct kobject *kobj, struct kobj_attribute *attr,
91578 const char *buf, size_t count);
91579 };
91580+typedef struct kobj_attribute __no_const kobj_attribute_no_const;
91581
91582 extern const struct sysfs_ops kobj_sysfs_ops;
91583
91584@@ -169,7 +170,7 @@ struct kset {
91585 spinlock_t list_lock;
91586 struct kobject kobj;
91587 const struct kset_uevent_ops *uevent_ops;
91588-};
91589+} __randomize_layout;
91590
91591 extern void kset_init(struct kset *kset);
91592 extern int __must_check kset_register(struct kset *kset);
91593diff --git a/include/linux/kobject_ns.h b/include/linux/kobject_ns.h
91594index df32d25..fb52e27 100644
91595--- a/include/linux/kobject_ns.h
91596+++ b/include/linux/kobject_ns.h
91597@@ -44,7 +44,7 @@ struct kobj_ns_type_operations {
91598 const void *(*netlink_ns)(struct sock *sk);
91599 const void *(*initial_ns)(void);
91600 void (*drop_ns)(void *);
91601-};
91602+} __do_const;
91603
91604 int kobj_ns_type_register(const struct kobj_ns_type_operations *ops);
91605 int kobj_ns_type_registered(enum kobj_ns_type type);
91606diff --git a/include/linux/kref.h b/include/linux/kref.h
91607index 484604d..0f6c5b6 100644
91608--- a/include/linux/kref.h
91609+++ b/include/linux/kref.h
91610@@ -68,7 +68,7 @@ static inline void kref_get(struct kref *kref)
91611 static inline int kref_sub(struct kref *kref, unsigned int count,
91612 void (*release)(struct kref *kref))
91613 {
91614- WARN_ON(release == NULL);
91615+ BUG_ON(release == NULL);
91616
91617 if (atomic_sub_and_test((int) count, &kref->refcount)) {
91618 release(kref);
91619diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
91620index d12b210..d91fd76 100644
91621--- a/include/linux/kvm_host.h
91622+++ b/include/linux/kvm_host.h
91623@@ -455,7 +455,7 @@ static inline void kvm_irqfd_exit(void)
91624 {
91625 }
91626 #endif
91627-int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
91628+int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
91629 struct module *module);
91630 void kvm_exit(void);
91631
91632@@ -633,7 +633,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
91633 struct kvm_guest_debug *dbg);
91634 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
91635
91636-int kvm_arch_init(void *opaque);
91637+int kvm_arch_init(const void *opaque);
91638 void kvm_arch_exit(void);
91639
91640 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
91641diff --git a/include/linux/libata.h b/include/linux/libata.h
91642index f8994b4..c1dec7a 100644
91643--- a/include/linux/libata.h
91644+++ b/include/linux/libata.h
91645@@ -989,7 +989,7 @@ struct ata_port_operations {
91646 * fields must be pointers.
91647 */
91648 const struct ata_port_operations *inherits;
91649-};
91650+} __do_const;
91651
91652 struct ata_port_info {
91653 unsigned long flags;
91654diff --git a/include/linux/linkage.h b/include/linux/linkage.h
91655index a6a42dd..6c5ebce 100644
91656--- a/include/linux/linkage.h
91657+++ b/include/linux/linkage.h
91658@@ -36,6 +36,7 @@
91659 #endif
91660
91661 #define __page_aligned_data __section(.data..page_aligned) __aligned(PAGE_SIZE)
91662+#define __page_aligned_rodata __read_only __aligned(PAGE_SIZE)
91663 #define __page_aligned_bss __section(.bss..page_aligned) __aligned(PAGE_SIZE)
91664
91665 /*
91666diff --git a/include/linux/list.h b/include/linux/list.h
91667index feb773c..98f3075 100644
91668--- a/include/linux/list.h
91669+++ b/include/linux/list.h
91670@@ -113,6 +113,19 @@ extern void __list_del_entry(struct list_head *entry);
91671 extern void list_del(struct list_head *entry);
91672 #endif
91673
91674+extern void __pax_list_add(struct list_head *new,
91675+ struct list_head *prev,
91676+ struct list_head *next);
91677+static inline void pax_list_add(struct list_head *new, struct list_head *head)
91678+{
91679+ __pax_list_add(new, head, head->next);
91680+}
91681+static inline void pax_list_add_tail(struct list_head *new, struct list_head *head)
91682+{
91683+ __pax_list_add(new, head->prev, head);
91684+}
91685+extern void pax_list_del(struct list_head *entry);
91686+
91687 /**
91688 * list_replace - replace old entry by new one
91689 * @old : the element to be replaced
91690@@ -146,6 +159,8 @@ static inline void list_del_init(struct list_head *entry)
91691 INIT_LIST_HEAD(entry);
91692 }
91693
91694+extern void pax_list_del_init(struct list_head *entry);
91695+
91696 /**
91697 * list_move - delete from one list and add as another's head
91698 * @list: the entry to move
91699diff --git a/include/linux/lockref.h b/include/linux/lockref.h
91700index b10b122..d37b3de 100644
91701--- a/include/linux/lockref.h
91702+++ b/include/linux/lockref.h
91703@@ -28,7 +28,7 @@ struct lockref {
91704 #endif
91705 struct {
91706 spinlock_t lock;
91707- int count;
91708+ atomic_t count;
91709 };
91710 };
91711 };
91712@@ -43,9 +43,29 @@ extern void lockref_mark_dead(struct lockref *);
91713 extern int lockref_get_not_dead(struct lockref *);
91714
91715 /* Must be called under spinlock for reliable results */
91716-static inline int __lockref_is_dead(const struct lockref *l)
91717+static inline int __lockref_is_dead(const struct lockref *lockref)
91718 {
91719- return ((int)l->count < 0);
91720+ return atomic_read(&lockref->count) < 0;
91721+}
91722+
91723+static inline int __lockref_read(const struct lockref *lockref)
91724+{
91725+ return atomic_read(&lockref->count);
91726+}
91727+
91728+static inline void __lockref_set(struct lockref *lockref, int count)
91729+{
91730+ atomic_set(&lockref->count, count);
91731+}
91732+
91733+static inline void __lockref_inc(struct lockref *lockref)
91734+{
91735+ atomic_inc(&lockref->count);
91736+}
91737+
91738+static inline void __lockref_dec(struct lockref *lockref)
91739+{
91740+ atomic_dec(&lockref->count);
91741 }
91742
91743 #endif /* __LINUX_LOCKREF_H */
91744diff --git a/include/linux/math64.h b/include/linux/math64.h
91745index c45c089..298841c 100644
91746--- a/include/linux/math64.h
91747+++ b/include/linux/math64.h
91748@@ -15,7 +15,7 @@
91749 * This is commonly provided by 32bit archs to provide an optimized 64bit
91750 * divide.
91751 */
91752-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
91753+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
91754 {
91755 *remainder = dividend % divisor;
91756 return dividend / divisor;
91757@@ -42,7 +42,7 @@ static inline u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder)
91758 /**
91759 * div64_u64 - unsigned 64bit divide with 64bit divisor
91760 */
91761-static inline u64 div64_u64(u64 dividend, u64 divisor)
91762+static inline u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor)
91763 {
91764 return dividend / divisor;
91765 }
91766@@ -61,7 +61,7 @@ static inline s64 div64_s64(s64 dividend, s64 divisor)
91767 #define div64_ul(x, y) div_u64((x), (y))
91768
91769 #ifndef div_u64_rem
91770-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
91771+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
91772 {
91773 *remainder = do_div(dividend, divisor);
91774 return dividend;
91775@@ -77,7 +77,7 @@ extern u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder);
91776 #endif
91777
91778 #ifndef div64_u64
91779-extern u64 div64_u64(u64 dividend, u64 divisor);
91780+extern u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor);
91781 #endif
91782
91783 #ifndef div64_s64
91784@@ -94,7 +94,7 @@ extern s64 div64_s64(s64 dividend, s64 divisor);
91785 * divide.
91786 */
91787 #ifndef div_u64
91788-static inline u64 div_u64(u64 dividend, u32 divisor)
91789+static inline u64 __intentional_overflow(-1) div_u64(u64 dividend, u32 divisor)
91790 {
91791 u32 remainder;
91792 return div_u64_rem(dividend, divisor, &remainder);
91793diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
91794index 3d385c8..deacb6a 100644
91795--- a/include/linux/mempolicy.h
91796+++ b/include/linux/mempolicy.h
91797@@ -91,6 +91,10 @@ static inline struct mempolicy *mpol_dup(struct mempolicy *pol)
91798 }
91799
91800 #define vma_policy(vma) ((vma)->vm_policy)
91801+static inline void set_vma_policy(struct vm_area_struct *vma, struct mempolicy *pol)
91802+{
91803+ vma->vm_policy = pol;
91804+}
91805
91806 static inline void mpol_get(struct mempolicy *pol)
91807 {
91808@@ -229,6 +233,9 @@ static inline void mpol_free_shared_policy(struct shared_policy *p)
91809 }
91810
91811 #define vma_policy(vma) NULL
91812+static inline void set_vma_policy(struct vm_area_struct *vma, struct mempolicy *pol)
91813+{
91814+}
91815
91816 static inline int
91817 vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
91818diff --git a/include/linux/mm.h b/include/linux/mm.h
91819index 47a9392..ef645bc 100644
91820--- a/include/linux/mm.h
91821+++ b/include/linux/mm.h
91822@@ -135,6 +135,11 @@ extern unsigned int kobjsize(const void *objp);
91823
91824 #define VM_DONTCOPY 0x00020000 /* Do not copy this vma on fork */
91825 #define VM_DONTEXPAND 0x00040000 /* Cannot expand with mremap() */
91826+
91827+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
91828+#define VM_PAGEEXEC 0x00080000 /* vma->vm_page_prot needs special handling */
91829+#endif
91830+
91831 #define VM_ACCOUNT 0x00100000 /* Is a VM accounted object */
91832 #define VM_NORESERVE 0x00200000 /* should the VM suppress accounting */
91833 #define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */
91834@@ -254,8 +259,8 @@ struct vm_operations_struct {
91835 /* called by access_process_vm when get_user_pages() fails, typically
91836 * for use by special VMAs that can switch between memory and hardware
91837 */
91838- int (*access)(struct vm_area_struct *vma, unsigned long addr,
91839- void *buf, int len, int write);
91840+ ssize_t (*access)(struct vm_area_struct *vma, unsigned long addr,
91841+ void *buf, size_t len, int write);
91842
91843 /* Called by the /proc/PID/maps code to ask the vma whether it
91844 * has a special name. Returning non-NULL will also cause this
91845@@ -293,6 +298,7 @@ struct vm_operations_struct {
91846 struct page *(*find_special_page)(struct vm_area_struct *vma,
91847 unsigned long addr);
91848 };
91849+typedef struct vm_operations_struct __no_const vm_operations_struct_no_const;
91850
91851 struct mmu_gather;
91852 struct inode;
91853@@ -1213,8 +1219,8 @@ int follow_pfn(struct vm_area_struct *vma, unsigned long address,
91854 unsigned long *pfn);
91855 int follow_phys(struct vm_area_struct *vma, unsigned long address,
91856 unsigned int flags, unsigned long *prot, resource_size_t *phys);
91857-int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
91858- void *buf, int len, int write);
91859+ssize_t generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
91860+ void *buf, size_t len, int write);
91861
91862 static inline void unmap_shared_mapping_range(struct address_space *mapping,
91863 loff_t const holebegin, loff_t const holelen)
91864@@ -1254,9 +1260,9 @@ static inline int fixup_user_fault(struct task_struct *tsk,
91865 }
91866 #endif
91867
91868-extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
91869-extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
91870- void *buf, int len, int write);
91871+extern ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, size_t len, int write);
91872+extern ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
91873+ void *buf, size_t len, int write);
91874
91875 long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
91876 unsigned long start, unsigned long nr_pages,
91877@@ -1299,34 +1305,6 @@ int set_page_dirty_lock(struct page *page);
91878 int clear_page_dirty_for_io(struct page *page);
91879 int get_cmdline(struct task_struct *task, char *buffer, int buflen);
91880
91881-/* Is the vma a continuation of the stack vma above it? */
91882-static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
91883-{
91884- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
91885-}
91886-
91887-static inline int stack_guard_page_start(struct vm_area_struct *vma,
91888- unsigned long addr)
91889-{
91890- return (vma->vm_flags & VM_GROWSDOWN) &&
91891- (vma->vm_start == addr) &&
91892- !vma_growsdown(vma->vm_prev, addr);
91893-}
91894-
91895-/* Is the vma a continuation of the stack vma below it? */
91896-static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
91897-{
91898- return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
91899-}
91900-
91901-static inline int stack_guard_page_end(struct vm_area_struct *vma,
91902- unsigned long addr)
91903-{
91904- return (vma->vm_flags & VM_GROWSUP) &&
91905- (vma->vm_end == addr) &&
91906- !vma_growsup(vma->vm_next, addr);
91907-}
91908-
91909 extern struct task_struct *task_of_stack(struct task_struct *task,
91910 struct vm_area_struct *vma, bool in_group);
91911
91912@@ -1449,8 +1427,15 @@ static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
91913 {
91914 return 0;
91915 }
91916+
91917+static inline int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd,
91918+ unsigned long address)
91919+{
91920+ return 0;
91921+}
91922 #else
91923 int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
91924+int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
91925 #endif
91926
91927 #if defined(__PAGETABLE_PMD_FOLDED) || !defined(CONFIG_MMU)
91928@@ -1460,6 +1445,12 @@ static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
91929 return 0;
91930 }
91931
91932+static inline int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud,
91933+ unsigned long address)
91934+{
91935+ return 0;
91936+}
91937+
91938 static inline void mm_nr_pmds_init(struct mm_struct *mm) {}
91939
91940 static inline unsigned long mm_nr_pmds(struct mm_struct *mm)
91941@@ -1472,6 +1463,7 @@ static inline void mm_dec_nr_pmds(struct mm_struct *mm) {}
91942
91943 #else
91944 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
91945+int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address);
91946
91947 static inline void mm_nr_pmds_init(struct mm_struct *mm)
91948 {
91949@@ -1509,11 +1501,23 @@ static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long a
91950 NULL: pud_offset(pgd, address);
91951 }
91952
91953+static inline pud_t *pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
91954+{
91955+ return (unlikely(pgd_none(*pgd)) && __pud_alloc_kernel(mm, pgd, address))?
91956+ NULL: pud_offset(pgd, address);
91957+}
91958+
91959 static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
91960 {
91961 return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
91962 NULL: pmd_offset(pud, address);
91963 }
91964+
91965+static inline pmd_t *pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
91966+{
91967+ return (unlikely(pud_none(*pud)) && __pmd_alloc_kernel(mm, pud, address))?
91968+ NULL: pmd_offset(pud, address);
91969+}
91970 #endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
91971
91972 #if USE_SPLIT_PTE_PTLOCKS
91973@@ -1890,12 +1894,23 @@ extern struct vm_area_struct *copy_vma(struct vm_area_struct **,
91974 bool *need_rmap_locks);
91975 extern void exit_mmap(struct mm_struct *);
91976
91977+#if defined(CONFIG_GRKERNSEC) && (defined(CONFIG_GRKERNSEC_RESLOG) || !defined(CONFIG_GRKERNSEC_NO_RBAC))
91978+extern void gr_learn_resource(const struct task_struct *task, const int res,
91979+ const unsigned long wanted, const int gt);
91980+#else
91981+static inline void gr_learn_resource(const struct task_struct *task, const int res,
91982+ const unsigned long wanted, const int gt)
91983+{
91984+}
91985+#endif
91986+
91987 static inline int check_data_rlimit(unsigned long rlim,
91988 unsigned long new,
91989 unsigned long start,
91990 unsigned long end_data,
91991 unsigned long start_data)
91992 {
91993+ gr_learn_resource(current, RLIMIT_DATA, (new - start) + (end_data - start_data), 1);
91994 if (rlim < RLIM_INFINITY) {
91995 if (((new - start) + (end_data - start_data)) > rlim)
91996 return -ENOSPC;
91997@@ -1920,7 +1935,7 @@ extern int install_special_mapping(struct mm_struct *mm,
91998 unsigned long addr, unsigned long len,
91999 unsigned long flags, struct page **pages);
92000
92001-extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
92002+extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long) __intentional_overflow(-1);
92003
92004 extern unsigned long mmap_region(struct file *file, unsigned long addr,
92005 unsigned long len, vm_flags_t vm_flags, unsigned long pgoff);
92006@@ -1928,6 +1943,7 @@ extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
92007 unsigned long len, unsigned long prot, unsigned long flags,
92008 unsigned long pgoff, unsigned long *populate);
92009 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
92010+extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
92011
92012 #ifdef CONFIG_MMU
92013 extern int __mm_populate(unsigned long addr, unsigned long len,
92014@@ -1956,10 +1972,11 @@ struct vm_unmapped_area_info {
92015 unsigned long high_limit;
92016 unsigned long align_mask;
92017 unsigned long align_offset;
92018+ unsigned long threadstack_offset;
92019 };
92020
92021-extern unsigned long unmapped_area(struct vm_unmapped_area_info *info);
92022-extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
92023+extern unsigned long unmapped_area(const struct vm_unmapped_area_info *info);
92024+extern unsigned long unmapped_area_topdown(const struct vm_unmapped_area_info *info);
92025
92026 /*
92027 * Search for an unmapped address range.
92028@@ -1971,7 +1988,7 @@ extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
92029 * - satisfies (begin_addr & align_mask) == (align_offset & align_mask)
92030 */
92031 static inline unsigned long
92032-vm_unmapped_area(struct vm_unmapped_area_info *info)
92033+vm_unmapped_area(const struct vm_unmapped_area_info *info)
92034 {
92035 if (!(info->flags & VM_UNMAPPED_AREA_TOPDOWN))
92036 return unmapped_area(info);
92037@@ -2033,6 +2050,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
92038 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
92039 struct vm_area_struct **pprev);
92040
92041+extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
92042+extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
92043+extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
92044+
92045 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
92046 NULL if none. Assume start_addr < end_addr. */
92047 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
92048@@ -2062,10 +2083,10 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
92049 }
92050
92051 #ifdef CONFIG_MMU
92052-pgprot_t vm_get_page_prot(unsigned long vm_flags);
92053+pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
92054 void vma_set_page_prot(struct vm_area_struct *vma);
92055 #else
92056-static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
92057+static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
92058 {
92059 return __pgprot(0);
92060 }
92061@@ -2127,6 +2148,11 @@ void vm_stat_account(struct mm_struct *, unsigned long, struct file *, long);
92062 static inline void vm_stat_account(struct mm_struct *mm,
92063 unsigned long flags, struct file *file, long pages)
92064 {
92065+
92066+#ifdef CONFIG_PAX_RANDMMAP
92067+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
92068+#endif
92069+
92070 mm->total_vm += pages;
92071 }
92072 #endif /* CONFIG_PROC_FS */
92073@@ -2229,7 +2255,7 @@ extern int unpoison_memory(unsigned long pfn);
92074 extern int sysctl_memory_failure_early_kill;
92075 extern int sysctl_memory_failure_recovery;
92076 extern void shake_page(struct page *p, int access);
92077-extern atomic_long_t num_poisoned_pages;
92078+extern atomic_long_unchecked_t num_poisoned_pages;
92079 extern int soft_offline_page(struct page *page, int flags);
92080
92081 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
92082@@ -2280,5 +2306,11 @@ void __init setup_nr_node_ids(void);
92083 static inline void setup_nr_node_ids(void) {}
92084 #endif
92085
92086+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
92087+extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
92088+#else
92089+static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
92090+#endif
92091+
92092 #endif /* __KERNEL__ */
92093 #endif /* _LINUX_MM_H */
92094diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
92095index 199a03a..7328440 100644
92096--- a/include/linux/mm_types.h
92097+++ b/include/linux/mm_types.h
92098@@ -313,7 +313,9 @@ struct vm_area_struct {
92099 #ifdef CONFIG_NUMA
92100 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
92101 #endif
92102-};
92103+
92104+ struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
92105+} __randomize_layout;
92106
92107 struct core_thread {
92108 struct task_struct *task;
92109@@ -464,7 +466,25 @@ struct mm_struct {
92110 /* address of the bounds directory */
92111 void __user *bd_addr;
92112 #endif
92113-};
92114+
92115+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
92116+ unsigned long pax_flags;
92117+#endif
92118+
92119+#ifdef CONFIG_PAX_DLRESOLVE
92120+ unsigned long call_dl_resolve;
92121+#endif
92122+
92123+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
92124+ unsigned long call_syscall;
92125+#endif
92126+
92127+#ifdef CONFIG_PAX_ASLR
92128+ unsigned long delta_mmap; /* randomized offset */
92129+ unsigned long delta_stack; /* randomized offset */
92130+#endif
92131+
92132+} __randomize_layout;
92133
92134 static inline void mm_init_cpumask(struct mm_struct *mm)
92135 {
92136diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h
92137index 160448f..7b332b7 100644
92138--- a/include/linux/mmc/core.h
92139+++ b/include/linux/mmc/core.h
92140@@ -79,7 +79,7 @@ struct mmc_command {
92141 #define mmc_cmd_type(cmd) ((cmd)->flags & MMC_CMD_MASK)
92142
92143 unsigned int retries; /* max number of retries */
92144- unsigned int error; /* command error */
92145+ int error; /* command error */
92146
92147 /*
92148 * Standard errno values are used for errors, but some have specific
92149diff --git a/include/linux/mmiotrace.h b/include/linux/mmiotrace.h
92150index c5d5278..f0b68c8 100644
92151--- a/include/linux/mmiotrace.h
92152+++ b/include/linux/mmiotrace.h
92153@@ -46,7 +46,7 @@ extern int kmmio_handler(struct pt_regs *regs, unsigned long addr);
92154 /* Called from ioremap.c */
92155 extern void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
92156 void __iomem *addr);
92157-extern void mmiotrace_iounmap(volatile void __iomem *addr);
92158+extern void mmiotrace_iounmap(const volatile void __iomem *addr);
92159
92160 /* For anyone to insert markers. Remember trailing newline. */
92161 extern __printf(1, 2) int mmiotrace_printk(const char *fmt, ...);
92162@@ -66,7 +66,7 @@ static inline void mmiotrace_ioremap(resource_size_t offset,
92163 {
92164 }
92165
92166-static inline void mmiotrace_iounmap(volatile void __iomem *addr)
92167+static inline void mmiotrace_iounmap(const volatile void __iomem *addr)
92168 {
92169 }
92170
92171diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
92172index 2782df4..abe756e 100644
92173--- a/include/linux/mmzone.h
92174+++ b/include/linux/mmzone.h
92175@@ -526,7 +526,7 @@ struct zone {
92176
92177 ZONE_PADDING(_pad3_)
92178 /* Zone statistics */
92179- atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
92180+ atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
92181 } ____cacheline_internodealigned_in_smp;
92182
92183 enum zone_flags {
92184diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
92185index e530533..c9620c7 100644
92186--- a/include/linux/mod_devicetable.h
92187+++ b/include/linux/mod_devicetable.h
92188@@ -139,7 +139,7 @@ struct usb_device_id {
92189 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
92190 #define USB_DEVICE_ID_MATCH_INT_NUMBER 0x0400
92191
92192-#define HID_ANY_ID (~0)
92193+#define HID_ANY_ID (~0U)
92194 #define HID_BUS_ANY 0xffff
92195 #define HID_GROUP_ANY 0x0000
92196
92197@@ -470,7 +470,7 @@ struct dmi_system_id {
92198 const char *ident;
92199 struct dmi_strmatch matches[4];
92200 void *driver_data;
92201-};
92202+} __do_const;
92203 /*
92204 * struct dmi_device_id appears during expansion of
92205 * "MODULE_DEVICE_TABLE(dmi, x)". Compiler doesn't look inside it
92206diff --git a/include/linux/module.h b/include/linux/module.h
92207index b03485b..a26974f 100644
92208--- a/include/linux/module.h
92209+++ b/include/linux/module.h
92210@@ -17,9 +17,11 @@
92211 #include <linux/moduleparam.h>
92212 #include <linux/jump_label.h>
92213 #include <linux/export.h>
92214+#include <linux/fs.h>
92215
92216 #include <linux/percpu.h>
92217 #include <asm/module.h>
92218+#include <asm/pgtable.h>
92219
92220 /* In stripped ARM and x86-64 modules, ~ is surprisingly rare. */
92221 #define MODULE_SIG_STRING "~Module signature appended~\n"
92222@@ -42,7 +44,7 @@ struct module_kobject {
92223 struct kobject *drivers_dir;
92224 struct module_param_attrs *mp;
92225 struct completion *kobj_completion;
92226-};
92227+} __randomize_layout;
92228
92229 struct module_attribute {
92230 struct attribute attr;
92231@@ -54,12 +56,13 @@ struct module_attribute {
92232 int (*test)(struct module *);
92233 void (*free)(struct module *);
92234 };
92235+typedef struct module_attribute __no_const module_attribute_no_const;
92236
92237 struct module_version_attribute {
92238 struct module_attribute mattr;
92239 const char *module_name;
92240 const char *version;
92241-} __attribute__ ((__aligned__(sizeof(void *))));
92242+} __do_const __attribute__ ((__aligned__(sizeof(void *))));
92243
92244 extern ssize_t __modver_version_show(struct module_attribute *,
92245 struct module_kobject *, char *);
92246@@ -221,7 +224,7 @@ struct module {
92247
92248 /* Sysfs stuff. */
92249 struct module_kobject mkobj;
92250- struct module_attribute *modinfo_attrs;
92251+ module_attribute_no_const *modinfo_attrs;
92252 const char *version;
92253 const char *srcversion;
92254 struct kobject *holders_dir;
92255@@ -270,19 +273,16 @@ struct module {
92256 int (*init)(void);
92257
92258 /* If this is non-NULL, vfree after init() returns */
92259- void *module_init;
92260+ void *module_init_rx, *module_init_rw;
92261
92262 /* Here is the actual code + data, vfree'd on unload. */
92263- void *module_core;
92264+ void *module_core_rx, *module_core_rw;
92265
92266 /* Here are the sizes of the init and core sections */
92267- unsigned int init_size, core_size;
92268+ unsigned int init_size_rw, core_size_rw;
92269
92270 /* The size of the executable code in each section. */
92271- unsigned int init_text_size, core_text_size;
92272-
92273- /* Size of RO sections of the module (text+rodata) */
92274- unsigned int init_ro_size, core_ro_size;
92275+ unsigned int init_size_rx, core_size_rx;
92276
92277 /* Arch-specific module values */
92278 struct mod_arch_specific arch;
92279@@ -338,6 +338,10 @@ struct module {
92280 #ifdef CONFIG_EVENT_TRACING
92281 struct ftrace_event_call **trace_events;
92282 unsigned int num_trace_events;
92283+ struct file_operations trace_id;
92284+ struct file_operations trace_enable;
92285+ struct file_operations trace_format;
92286+ struct file_operations trace_filter;
92287 #endif
92288 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
92289 unsigned int num_ftrace_callsites;
92290@@ -365,7 +369,7 @@ struct module {
92291 ctor_fn_t *ctors;
92292 unsigned int num_ctors;
92293 #endif
92294-};
92295+} __randomize_layout;
92296 #ifndef MODULE_ARCH_INIT
92297 #define MODULE_ARCH_INIT {}
92298 #endif
92299@@ -386,18 +390,48 @@ bool is_module_address(unsigned long addr);
92300 bool is_module_percpu_address(unsigned long addr);
92301 bool is_module_text_address(unsigned long addr);
92302
92303+static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
92304+{
92305+
92306+#ifdef CONFIG_PAX_KERNEXEC
92307+ if (ktla_ktva(addr) >= (unsigned long)start &&
92308+ ktla_ktva(addr) < (unsigned long)start + size)
92309+ return 1;
92310+#endif
92311+
92312+ return ((void *)addr >= start && (void *)addr < start + size);
92313+}
92314+
92315+static inline int within_module_core_rx(unsigned long addr, const struct module *mod)
92316+{
92317+ return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
92318+}
92319+
92320+static inline int within_module_core_rw(unsigned long addr, const struct module *mod)
92321+{
92322+ return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
92323+}
92324+
92325+static inline int within_module_init_rx(unsigned long addr, const struct module *mod)
92326+{
92327+ return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
92328+}
92329+
92330+static inline int within_module_init_rw(unsigned long addr, const struct module *mod)
92331+{
92332+ return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
92333+}
92334+
92335 static inline bool within_module_core(unsigned long addr,
92336 const struct module *mod)
92337 {
92338- return (unsigned long)mod->module_core <= addr &&
92339- addr < (unsigned long)mod->module_core + mod->core_size;
92340+ return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
92341 }
92342
92343 static inline bool within_module_init(unsigned long addr,
92344 const struct module *mod)
92345 {
92346- return (unsigned long)mod->module_init <= addr &&
92347- addr < (unsigned long)mod->module_init + mod->init_size;
92348+ return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
92349 }
92350
92351 static inline bool within_module(unsigned long addr, const struct module *mod)
92352diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
92353index 4d0cb9b..3169ac7 100644
92354--- a/include/linux/moduleloader.h
92355+++ b/include/linux/moduleloader.h
92356@@ -25,9 +25,21 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
92357 sections. Returns NULL on failure. */
92358 void *module_alloc(unsigned long size);
92359
92360+#ifdef CONFIG_PAX_KERNEXEC
92361+void *module_alloc_exec(unsigned long size);
92362+#else
92363+#define module_alloc_exec(x) module_alloc(x)
92364+#endif
92365+
92366 /* Free memory returned from module_alloc. */
92367 void module_memfree(void *module_region);
92368
92369+#ifdef CONFIG_PAX_KERNEXEC
92370+void module_memfree_exec(void *module_region);
92371+#else
92372+#define module_memfree_exec(x) module_memfree((x))
92373+#endif
92374+
92375 /*
92376 * Apply the given relocation to the (simplified) ELF. Return -error
92377 * or 0.
92378@@ -45,8 +57,10 @@ static inline int apply_relocate(Elf_Shdr *sechdrs,
92379 unsigned int relsec,
92380 struct module *me)
92381 {
92382+#ifdef CONFIG_MODULES
92383 printk(KERN_ERR "module %s: REL relocation unsupported\n",
92384 module_name(me));
92385+#endif
92386 return -ENOEXEC;
92387 }
92388 #endif
92389@@ -68,8 +82,10 @@ static inline int apply_relocate_add(Elf_Shdr *sechdrs,
92390 unsigned int relsec,
92391 struct module *me)
92392 {
92393+#ifdef CONFIG_MODULES
92394 printk(KERN_ERR "module %s: REL relocation unsupported\n",
92395 module_name(me));
92396+#endif
92397 return -ENOEXEC;
92398 }
92399 #endif
92400diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
92401index 1c9effa..1160bdd 100644
92402--- a/include/linux/moduleparam.h
92403+++ b/include/linux/moduleparam.h
92404@@ -323,7 +323,7 @@ static inline void __kernel_param_unlock(void)
92405 * @len is usually just sizeof(string).
92406 */
92407 #define module_param_string(name, string, len, perm) \
92408- static const struct kparam_string __param_string_##name \
92409+ static const struct kparam_string __param_string_##name __used \
92410 = { len, string }; \
92411 __module_param_call(MODULE_PARAM_PREFIX, name, \
92412 &param_ops_string, \
92413@@ -467,7 +467,7 @@ extern int param_set_bint(const char *val, const struct kernel_param *kp);
92414 */
92415 #define module_param_array_named(name, array, type, nump, perm) \
92416 param_check_##type(name, &(array)[0]); \
92417- static const struct kparam_array __param_arr_##name \
92418+ static const struct kparam_array __param_arr_##name __used \
92419 = { .max = ARRAY_SIZE(array), .num = nump, \
92420 .ops = &param_ops_##type, \
92421 .elemsize = sizeof(array[0]), .elem = array }; \
92422diff --git a/include/linux/mount.h b/include/linux/mount.h
92423index 564beee..653be6f 100644
92424--- a/include/linux/mount.h
92425+++ b/include/linux/mount.h
92426@@ -67,7 +67,7 @@ struct vfsmount {
92427 struct dentry *mnt_root; /* root of the mounted tree */
92428 struct super_block *mnt_sb; /* pointer to superblock */
92429 int mnt_flags;
92430-};
92431+} __randomize_layout;
92432
92433 struct file; /* forward dec */
92434 struct path;
92435diff --git a/include/linux/namei.h b/include/linux/namei.h
92436index c899077..b9a2010 100644
92437--- a/include/linux/namei.h
92438+++ b/include/linux/namei.h
92439@@ -71,8 +71,8 @@ extern struct dentry *lock_rename(struct dentry *, struct dentry *);
92440 extern void unlock_rename(struct dentry *, struct dentry *);
92441
92442 extern void nd_jump_link(struct nameidata *nd, struct path *path);
92443-extern void nd_set_link(struct nameidata *nd, char *path);
92444-extern char *nd_get_link(struct nameidata *nd);
92445+extern void nd_set_link(struct nameidata *nd, const char *path);
92446+extern const char *nd_get_link(const struct nameidata *nd);
92447
92448 static inline void nd_terminate_link(void *name, size_t len, size_t maxlen)
92449 {
92450diff --git a/include/linux/net.h b/include/linux/net.h
92451index 17d8339..81656c0 100644
92452--- a/include/linux/net.h
92453+++ b/include/linux/net.h
92454@@ -192,7 +192,7 @@ struct net_proto_family {
92455 int (*create)(struct net *net, struct socket *sock,
92456 int protocol, int kern);
92457 struct module *owner;
92458-};
92459+} __do_const;
92460
92461 struct iovec;
92462 struct kvec;
92463diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
92464index 2787388..1dd8e88 100644
92465--- a/include/linux/netdevice.h
92466+++ b/include/linux/netdevice.h
92467@@ -1198,6 +1198,7 @@ struct net_device_ops {
92468 u8 state);
92469 #endif
92470 };
92471+typedef struct net_device_ops __no_const net_device_ops_no_const;
92472
92473 /**
92474 * enum net_device_priv_flags - &struct net_device priv_flags
92475@@ -1546,10 +1547,10 @@ struct net_device {
92476
92477 struct net_device_stats stats;
92478
92479- atomic_long_t rx_dropped;
92480- atomic_long_t tx_dropped;
92481+ atomic_long_unchecked_t rx_dropped;
92482+ atomic_long_unchecked_t tx_dropped;
92483
92484- atomic_t carrier_changes;
92485+ atomic_unchecked_t carrier_changes;
92486
92487 #ifdef CONFIG_WIRELESS_EXT
92488 const struct iw_handler_def * wireless_handlers;
92489diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
92490index 2517ece..0bbfcfb 100644
92491--- a/include/linux/netfilter.h
92492+++ b/include/linux/netfilter.h
92493@@ -85,7 +85,7 @@ struct nf_sockopt_ops {
92494 #endif
92495 /* Use the module struct to lock set/get code in place */
92496 struct module *owner;
92497-};
92498+} __do_const;
92499
92500 /* Function to register/unregister hook points. */
92501 int nf_register_hook(struct nf_hook_ops *reg);
92502diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
92503index e955d47..04a5338 100644
92504--- a/include/linux/netfilter/nfnetlink.h
92505+++ b/include/linux/netfilter/nfnetlink.h
92506@@ -19,7 +19,7 @@ struct nfnl_callback {
92507 const struct nlattr * const cda[]);
92508 const struct nla_policy *policy; /* netlink attribute policy */
92509 const u_int16_t attr_count; /* number of nlattr's */
92510-};
92511+} __do_const;
92512
92513 struct nfnetlink_subsystem {
92514 const char *name;
92515diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h
92516new file mode 100644
92517index 0000000..33f4af8
92518--- /dev/null
92519+++ b/include/linux/netfilter/xt_gradm.h
92520@@ -0,0 +1,9 @@
92521+#ifndef _LINUX_NETFILTER_XT_GRADM_H
92522+#define _LINUX_NETFILTER_XT_GRADM_H 1
92523+
92524+struct xt_gradm_mtinfo {
92525+ __u16 flags;
92526+ __u16 invflags;
92527+};
92528+
92529+#endif
92530diff --git a/include/linux/nls.h b/include/linux/nls.h
92531index 520681b..2b7fabb 100644
92532--- a/include/linux/nls.h
92533+++ b/include/linux/nls.h
92534@@ -31,7 +31,7 @@ struct nls_table {
92535 const unsigned char *charset2upper;
92536 struct module *owner;
92537 struct nls_table *next;
92538-};
92539+} __do_const;
92540
92541 /* this value hold the maximum octet of charset */
92542 #define NLS_MAX_CHARSET_SIZE 6 /* for UTF-8 */
92543@@ -46,7 +46,7 @@ enum utf16_endian {
92544 /* nls_base.c */
92545 extern int __register_nls(struct nls_table *, struct module *);
92546 extern int unregister_nls(struct nls_table *);
92547-extern struct nls_table *load_nls(char *);
92548+extern struct nls_table *load_nls(const char *);
92549 extern void unload_nls(struct nls_table *);
92550 extern struct nls_table *load_nls_default(void);
92551 #define register_nls(nls) __register_nls((nls), THIS_MODULE)
92552diff --git a/include/linux/notifier.h b/include/linux/notifier.h
92553index d14a4c3..a078786 100644
92554--- a/include/linux/notifier.h
92555+++ b/include/linux/notifier.h
92556@@ -54,7 +54,8 @@ struct notifier_block {
92557 notifier_fn_t notifier_call;
92558 struct notifier_block __rcu *next;
92559 int priority;
92560-};
92561+} __do_const;
92562+typedef struct notifier_block __no_const notifier_block_no_const;
92563
92564 struct atomic_notifier_head {
92565 spinlock_t lock;
92566diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
92567index b2a0f15..4d7da32 100644
92568--- a/include/linux/oprofile.h
92569+++ b/include/linux/oprofile.h
92570@@ -138,9 +138,9 @@ int oprofilefs_create_ulong(struct dentry * root,
92571 int oprofilefs_create_ro_ulong(struct dentry * root,
92572 char const * name, ulong * val);
92573
92574-/** Create a file for read-only access to an atomic_t. */
92575+/** Create a file for read-only access to an atomic_unchecked_t. */
92576 int oprofilefs_create_ro_atomic(struct dentry * root,
92577- char const * name, atomic_t * val);
92578+ char const * name, atomic_unchecked_t * val);
92579
92580 /** create a directory */
92581 struct dentry *oprofilefs_mkdir(struct dentry *parent, char const *name);
92582diff --git a/include/linux/padata.h b/include/linux/padata.h
92583index 4386946..f50c615 100644
92584--- a/include/linux/padata.h
92585+++ b/include/linux/padata.h
92586@@ -129,7 +129,7 @@ struct parallel_data {
92587 struct padata_serial_queue __percpu *squeue;
92588 atomic_t reorder_objects;
92589 atomic_t refcnt;
92590- atomic_t seq_nr;
92591+ atomic_unchecked_t seq_nr;
92592 struct padata_cpumask cpumask;
92593 spinlock_t lock ____cacheline_aligned;
92594 unsigned int processed;
92595diff --git a/include/linux/path.h b/include/linux/path.h
92596index d137218..be0c176 100644
92597--- a/include/linux/path.h
92598+++ b/include/linux/path.h
92599@@ -1,13 +1,15 @@
92600 #ifndef _LINUX_PATH_H
92601 #define _LINUX_PATH_H
92602
92603+#include <linux/compiler.h>
92604+
92605 struct dentry;
92606 struct vfsmount;
92607
92608 struct path {
92609 struct vfsmount *mnt;
92610 struct dentry *dentry;
92611-};
92612+} __randomize_layout;
92613
92614 extern void path_get(const struct path *);
92615 extern void path_put(const struct path *);
92616diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h
92617index 8c78950..0d74ed9 100644
92618--- a/include/linux/pci_hotplug.h
92619+++ b/include/linux/pci_hotplug.h
92620@@ -71,7 +71,8 @@ struct hotplug_slot_ops {
92621 int (*get_latch_status) (struct hotplug_slot *slot, u8 *value);
92622 int (*get_adapter_status) (struct hotplug_slot *slot, u8 *value);
92623 int (*reset_slot) (struct hotplug_slot *slot, int probe);
92624-};
92625+} __do_const;
92626+typedef struct hotplug_slot_ops __no_const hotplug_slot_ops_no_const;
92627
92628 /**
92629 * struct hotplug_slot_info - used to notify the hotplug pci core of the state of the slot
92630diff --git a/include/linux/percpu.h b/include/linux/percpu.h
92631index caebf2a..4c3ae9d 100644
92632--- a/include/linux/percpu.h
92633+++ b/include/linux/percpu.h
92634@@ -34,7 +34,7 @@
92635 * preallocate for this. Keep PERCPU_DYNAMIC_RESERVE equal to or
92636 * larger than PERCPU_DYNAMIC_EARLY_SIZE.
92637 */
92638-#define PERCPU_DYNAMIC_EARLY_SLOTS 128
92639+#define PERCPU_DYNAMIC_EARLY_SLOTS 256
92640 #define PERCPU_DYNAMIC_EARLY_SIZE (12 << 10)
92641
92642 /*
92643diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
92644index 2b62198..2b74233 100644
92645--- a/include/linux/perf_event.h
92646+++ b/include/linux/perf_event.h
92647@@ -343,8 +343,8 @@ struct perf_event {
92648
92649 enum perf_event_active_state state;
92650 unsigned int attach_state;
92651- local64_t count;
92652- atomic64_t child_count;
92653+ local64_t count; /* PaX: fix it one day */
92654+ atomic64_unchecked_t child_count;
92655
92656 /*
92657 * These are the total time in nanoseconds that the event
92658@@ -395,8 +395,8 @@ struct perf_event {
92659 * These accumulate total time (in nanoseconds) that children
92660 * events have been enabled and running, respectively.
92661 */
92662- atomic64_t child_total_time_enabled;
92663- atomic64_t child_total_time_running;
92664+ atomic64_unchecked_t child_total_time_enabled;
92665+ atomic64_unchecked_t child_total_time_running;
92666
92667 /*
92668 * Protect attach/detach and child_list:
92669@@ -752,7 +752,7 @@ static inline void perf_callchain_store(struct perf_callchain_entry *entry, u64
92670 entry->ip[entry->nr++] = ip;
92671 }
92672
92673-extern int sysctl_perf_event_paranoid;
92674+extern int sysctl_perf_event_legitimately_concerned;
92675 extern int sysctl_perf_event_mlock;
92676 extern int sysctl_perf_event_sample_rate;
92677 extern int sysctl_perf_cpu_time_max_percent;
92678@@ -767,19 +767,24 @@ extern int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
92679 loff_t *ppos);
92680
92681
92682+static inline bool perf_paranoid_any(void)
92683+{
92684+ return sysctl_perf_event_legitimately_concerned > 2;
92685+}
92686+
92687 static inline bool perf_paranoid_tracepoint_raw(void)
92688 {
92689- return sysctl_perf_event_paranoid > -1;
92690+ return sysctl_perf_event_legitimately_concerned > -1;
92691 }
92692
92693 static inline bool perf_paranoid_cpu(void)
92694 {
92695- return sysctl_perf_event_paranoid > 0;
92696+ return sysctl_perf_event_legitimately_concerned > 0;
92697 }
92698
92699 static inline bool perf_paranoid_kernel(void)
92700 {
92701- return sysctl_perf_event_paranoid > 1;
92702+ return sysctl_perf_event_legitimately_concerned > 1;
92703 }
92704
92705 extern void perf_event_init(void);
92706@@ -912,7 +917,7 @@ struct perf_pmu_events_attr {
92707 struct device_attribute attr;
92708 u64 id;
92709 const char *event_str;
92710-};
92711+} __do_const;
92712
92713 ssize_t perf_event_sysfs_show(struct device *dev, struct device_attribute *attr,
92714 char *page);
92715diff --git a/include/linux/pid_namespace.h b/include/linux/pid_namespace.h
92716index 918b117..7af374b7 100644
92717--- a/include/linux/pid_namespace.h
92718+++ b/include/linux/pid_namespace.h
92719@@ -45,7 +45,7 @@ struct pid_namespace {
92720 int hide_pid;
92721 int reboot; /* group exit code if this pidns was rebooted */
92722 struct ns_common ns;
92723-};
92724+} __randomize_layout;
92725
92726 extern struct pid_namespace init_pid_ns;
92727
92728diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
92729index eb8b8ac..62649e1 100644
92730--- a/include/linux/pipe_fs_i.h
92731+++ b/include/linux/pipe_fs_i.h
92732@@ -47,10 +47,10 @@ struct pipe_inode_info {
92733 struct mutex mutex;
92734 wait_queue_head_t wait;
92735 unsigned int nrbufs, curbuf, buffers;
92736- unsigned int readers;
92737- unsigned int writers;
92738- unsigned int files;
92739- unsigned int waiting_writers;
92740+ atomic_t readers;
92741+ atomic_t writers;
92742+ atomic_t files;
92743+ atomic_t waiting_writers;
92744 unsigned int r_counter;
92745 unsigned int w_counter;
92746 struct page *tmp_page;
92747diff --git a/include/linux/pm.h b/include/linux/pm.h
92748index e2f1be6..78a0506 100644
92749--- a/include/linux/pm.h
92750+++ b/include/linux/pm.h
92751@@ -608,6 +608,7 @@ struct dev_pm_domain {
92752 struct dev_pm_ops ops;
92753 void (*detach)(struct device *dev, bool power_off);
92754 };
92755+typedef struct dev_pm_domain __no_const dev_pm_domain_no_const;
92756
92757 /*
92758 * The PM_EVENT_ messages are also used by drivers implementing the legacy
92759diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
92760index 080e778..cbdaef7 100644
92761--- a/include/linux/pm_domain.h
92762+++ b/include/linux/pm_domain.h
92763@@ -39,11 +39,11 @@ struct gpd_dev_ops {
92764 int (*save_state)(struct device *dev);
92765 int (*restore_state)(struct device *dev);
92766 bool (*active_wakeup)(struct device *dev);
92767-};
92768+} __no_const;
92769
92770 struct gpd_cpuidle_data {
92771 unsigned int saved_exit_latency;
92772- struct cpuidle_state *idle_state;
92773+ cpuidle_state_no_const *idle_state;
92774 };
92775
92776 struct generic_pm_domain {
92777diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
92778index 30e84d4..22278b4 100644
92779--- a/include/linux/pm_runtime.h
92780+++ b/include/linux/pm_runtime.h
92781@@ -115,7 +115,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev)
92782
92783 static inline void pm_runtime_mark_last_busy(struct device *dev)
92784 {
92785- ACCESS_ONCE(dev->power.last_busy) = jiffies;
92786+ ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
92787 }
92788
92789 static inline bool pm_runtime_is_irq_safe(struct device *dev)
92790diff --git a/include/linux/pnp.h b/include/linux/pnp.h
92791index 6512e9c..ec27fa2 100644
92792--- a/include/linux/pnp.h
92793+++ b/include/linux/pnp.h
92794@@ -298,7 +298,7 @@ static inline void pnp_set_drvdata(struct pnp_dev *pdev, void *data)
92795 struct pnp_fixup {
92796 char id[7];
92797 void (*quirk_function) (struct pnp_dev * dev); /* fixup function */
92798-};
92799+} __do_const;
92800
92801 /* config parameters */
92802 #define PNP_CONFIG_NORMAL 0x0001
92803diff --git a/include/linux/poison.h b/include/linux/poison.h
92804index 2110a81..13a11bb 100644
92805--- a/include/linux/poison.h
92806+++ b/include/linux/poison.h
92807@@ -19,8 +19,8 @@
92808 * under normal circumstances, used to verify that nobody uses
92809 * non-initialized list entries.
92810 */
92811-#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
92812-#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
92813+#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
92814+#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
92815
92816 /********** include/linux/timer.h **********/
92817 /*
92818diff --git a/include/linux/power/smartreflex.h b/include/linux/power/smartreflex.h
92819index d8b187c3..9a9257a 100644
92820--- a/include/linux/power/smartreflex.h
92821+++ b/include/linux/power/smartreflex.h
92822@@ -238,7 +238,7 @@ struct omap_sr_class_data {
92823 int (*notify)(struct omap_sr *sr, u32 status);
92824 u8 notify_flags;
92825 u8 class_type;
92826-};
92827+} __do_const;
92828
92829 /**
92830 * struct omap_sr_nvalue_table - Smartreflex n-target value info
92831diff --git a/include/linux/ppp-comp.h b/include/linux/ppp-comp.h
92832index 4ea1d37..80f4b33 100644
92833--- a/include/linux/ppp-comp.h
92834+++ b/include/linux/ppp-comp.h
92835@@ -84,7 +84,7 @@ struct compressor {
92836 struct module *owner;
92837 /* Extra skb space needed by the compressor algorithm */
92838 unsigned int comp_extra;
92839-};
92840+} __do_const;
92841
92842 /*
92843 * The return value from decompress routine is the length of the
92844diff --git a/include/linux/preempt.h b/include/linux/preempt.h
92845index de83b4e..c4b997d 100644
92846--- a/include/linux/preempt.h
92847+++ b/include/linux/preempt.h
92848@@ -27,11 +27,16 @@ extern void preempt_count_sub(int val);
92849 #define preempt_count_dec_and_test() __preempt_count_dec_and_test()
92850 #endif
92851
92852+#define raw_preempt_count_add(val) __preempt_count_add(val)
92853+#define raw_preempt_count_sub(val) __preempt_count_sub(val)
92854+
92855 #define __preempt_count_inc() __preempt_count_add(1)
92856 #define __preempt_count_dec() __preempt_count_sub(1)
92857
92858 #define preempt_count_inc() preempt_count_add(1)
92859+#define raw_preempt_count_inc() raw_preempt_count_add(1)
92860 #define preempt_count_dec() preempt_count_sub(1)
92861+#define raw_preempt_count_dec() raw_preempt_count_sub(1)
92862
92863 #ifdef CONFIG_PREEMPT_COUNT
92864
92865@@ -41,6 +46,12 @@ do { \
92866 barrier(); \
92867 } while (0)
92868
92869+#define raw_preempt_disable() \
92870+do { \
92871+ raw_preempt_count_inc(); \
92872+ barrier(); \
92873+} while (0)
92874+
92875 #define sched_preempt_enable_no_resched() \
92876 do { \
92877 barrier(); \
92878@@ -49,6 +60,12 @@ do { \
92879
92880 #define preempt_enable_no_resched() sched_preempt_enable_no_resched()
92881
92882+#define raw_preempt_enable_no_resched() \
92883+do { \
92884+ barrier(); \
92885+ raw_preempt_count_dec(); \
92886+} while (0)
92887+
92888 #ifdef CONFIG_PREEMPT
92889 #define preempt_enable() \
92890 do { \
92891@@ -113,8 +130,10 @@ do { \
92892 * region.
92893 */
92894 #define preempt_disable() barrier()
92895+#define raw_preempt_disable() barrier()
92896 #define sched_preempt_enable_no_resched() barrier()
92897 #define preempt_enable_no_resched() barrier()
92898+#define raw_preempt_enable_no_resched() barrier()
92899 #define preempt_enable() barrier()
92900 #define preempt_check_resched() do { } while (0)
92901
92902@@ -128,11 +147,13 @@ do { \
92903 /*
92904 * Modules have no business playing preemption tricks.
92905 */
92906+#ifndef CONFIG_PAX_KERNEXEC
92907 #undef sched_preempt_enable_no_resched
92908 #undef preempt_enable_no_resched
92909 #undef preempt_enable_no_resched_notrace
92910 #undef preempt_check_resched
92911 #endif
92912+#endif
92913
92914 #define preempt_set_need_resched() \
92915 do { \
92916diff --git a/include/linux/printk.h b/include/linux/printk.h
92917index baa3f97..168cff1 100644
92918--- a/include/linux/printk.h
92919+++ b/include/linux/printk.h
92920@@ -121,6 +121,7 @@ void early_printk(const char *s, ...) { }
92921 #endif
92922
92923 typedef int(*printk_func_t)(const char *fmt, va_list args);
92924+extern int kptr_restrict;
92925
92926 #ifdef CONFIG_PRINTK
92927 asmlinkage __printf(5, 0)
92928@@ -156,7 +157,6 @@ extern bool printk_timed_ratelimit(unsigned long *caller_jiffies,
92929
92930 extern int printk_delay_msec;
92931 extern int dmesg_restrict;
92932-extern int kptr_restrict;
92933
92934 extern void wake_up_klogd(void);
92935
92936diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
92937index b97bf2e..f14c92d4 100644
92938--- a/include/linux/proc_fs.h
92939+++ b/include/linux/proc_fs.h
92940@@ -17,8 +17,11 @@ extern void proc_flush_task(struct task_struct *);
92941 extern struct proc_dir_entry *proc_symlink(const char *,
92942 struct proc_dir_entry *, const char *);
92943 extern struct proc_dir_entry *proc_mkdir(const char *, struct proc_dir_entry *);
92944+extern struct proc_dir_entry *proc_mkdir_restrict(const char *, struct proc_dir_entry *);
92945 extern struct proc_dir_entry *proc_mkdir_data(const char *, umode_t,
92946 struct proc_dir_entry *, void *);
92947+extern struct proc_dir_entry *proc_mkdir_data_restrict(const char *, umode_t,
92948+ struct proc_dir_entry *, void *);
92949 extern struct proc_dir_entry *proc_mkdir_mode(const char *, umode_t,
92950 struct proc_dir_entry *);
92951
92952@@ -34,6 +37,19 @@ static inline struct proc_dir_entry *proc_create(
92953 return proc_create_data(name, mode, parent, proc_fops, NULL);
92954 }
92955
92956+static inline struct proc_dir_entry *proc_create_grsec(const char *name, umode_t mode,
92957+ struct proc_dir_entry *parent, const struct file_operations *proc_fops)
92958+{
92959+#ifdef CONFIG_GRKERNSEC_PROC_USER
92960+ return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
92961+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
92962+ return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
92963+#else
92964+ return proc_create_data(name, mode, parent, proc_fops, NULL);
92965+#endif
92966+}
92967+
92968+
92969 extern void proc_set_size(struct proc_dir_entry *, loff_t);
92970 extern void proc_set_user(struct proc_dir_entry *, kuid_t, kgid_t);
92971 extern void *PDE_DATA(const struct inode *);
92972@@ -56,8 +72,12 @@ static inline struct proc_dir_entry *proc_symlink(const char *name,
92973 struct proc_dir_entry *parent,const char *dest) { return NULL;}
92974 static inline struct proc_dir_entry *proc_mkdir(const char *name,
92975 struct proc_dir_entry *parent) {return NULL;}
92976+static inline struct proc_dir_entry *proc_mkdir_restrict(const char *name,
92977+ struct proc_dir_entry *parent) { return NULL; }
92978 static inline struct proc_dir_entry *proc_mkdir_data(const char *name,
92979 umode_t mode, struct proc_dir_entry *parent, void *data) { return NULL; }
92980+static inline struct proc_dir_entry *proc_mkdir_data_restrict(const char *name,
92981+ umode_t mode, struct proc_dir_entry *parent, void *data) { return NULL; }
92982 static inline struct proc_dir_entry *proc_mkdir_mode(const char *name,
92983 umode_t mode, struct proc_dir_entry *parent) { return NULL; }
92984 #define proc_create(name, mode, parent, proc_fops) ({NULL;})
92985@@ -79,7 +99,7 @@ struct net;
92986 static inline struct proc_dir_entry *proc_net_mkdir(
92987 struct net *net, const char *name, struct proc_dir_entry *parent)
92988 {
92989- return proc_mkdir_data(name, 0, parent, net);
92990+ return proc_mkdir_data_restrict(name, 0, parent, net);
92991 }
92992
92993 #endif /* _LINUX_PROC_FS_H */
92994diff --git a/include/linux/proc_ns.h b/include/linux/proc_ns.h
92995index 42dfc61..8113a99 100644
92996--- a/include/linux/proc_ns.h
92997+++ b/include/linux/proc_ns.h
92998@@ -16,7 +16,7 @@ struct proc_ns_operations {
92999 struct ns_common *(*get)(struct task_struct *task);
93000 void (*put)(struct ns_common *ns);
93001 int (*install)(struct nsproxy *nsproxy, struct ns_common *ns);
93002-};
93003+} __do_const __randomize_layout;
93004
93005 extern const struct proc_ns_operations netns_operations;
93006 extern const struct proc_ns_operations utsns_operations;
93007diff --git a/include/linux/quota.h b/include/linux/quota.h
93008index d534e8e..782e604 100644
93009--- a/include/linux/quota.h
93010+++ b/include/linux/quota.h
93011@@ -75,7 +75,7 @@ struct kqid { /* Type in which we store the quota identifier */
93012
93013 extern bool qid_eq(struct kqid left, struct kqid right);
93014 extern bool qid_lt(struct kqid left, struct kqid right);
93015-extern qid_t from_kqid(struct user_namespace *to, struct kqid qid);
93016+extern qid_t from_kqid(struct user_namespace *to, struct kqid qid) __intentional_overflow(-1);
93017 extern qid_t from_kqid_munged(struct user_namespace *to, struct kqid qid);
93018 extern bool qid_valid(struct kqid qid);
93019
93020diff --git a/include/linux/random.h b/include/linux/random.h
93021index b05856e..0a9f14e 100644
93022--- a/include/linux/random.h
93023+++ b/include/linux/random.h
93024@@ -9,9 +9,19 @@
93025 #include <uapi/linux/random.h>
93026
93027 extern void add_device_randomness(const void *, unsigned int);
93028+
93029+static inline void add_latent_entropy(void)
93030+{
93031+
93032+#ifdef LATENT_ENTROPY_PLUGIN
93033+ add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
93034+#endif
93035+
93036+}
93037+
93038 extern void add_input_randomness(unsigned int type, unsigned int code,
93039- unsigned int value);
93040-extern void add_interrupt_randomness(int irq, int irq_flags);
93041+ unsigned int value) __latent_entropy;
93042+extern void add_interrupt_randomness(int irq, int irq_flags) __latent_entropy;
93043
93044 extern void get_random_bytes(void *buf, int nbytes);
93045 extern void get_random_bytes_arch(void *buf, int nbytes);
93046@@ -22,10 +32,10 @@ extern int random_int_secret_init(void);
93047 extern const struct file_operations random_fops, urandom_fops;
93048 #endif
93049
93050-unsigned int get_random_int(void);
93051+unsigned int __intentional_overflow(-1) get_random_int(void);
93052 unsigned long randomize_range(unsigned long start, unsigned long end, unsigned long len);
93053
93054-u32 prandom_u32(void);
93055+u32 prandom_u32(void) __intentional_overflow(-1);
93056 void prandom_bytes(void *buf, size_t nbytes);
93057 void prandom_seed(u32 seed);
93058 void prandom_reseed_late(void);
93059@@ -37,6 +47,11 @@ struct rnd_state {
93060 u32 prandom_u32_state(struct rnd_state *state);
93061 void prandom_bytes_state(struct rnd_state *state, void *buf, size_t nbytes);
93062
93063+static inline unsigned long __intentional_overflow(-1) pax_get_random_long(void)
93064+{
93065+ return prandom_u32() + (sizeof(long) > 4 ? (unsigned long)prandom_u32() << 32 : 0);
93066+}
93067+
93068 /**
93069 * prandom_u32_max - returns a pseudo-random number in interval [0, ep_ro)
93070 * @ep_ro: right open interval endpoint
93071@@ -49,7 +64,7 @@ void prandom_bytes_state(struct rnd_state *state, void *buf, size_t nbytes);
93072 *
93073 * Returns: pseudo-random number in interval [0, ep_ro)
93074 */
93075-static inline u32 prandom_u32_max(u32 ep_ro)
93076+static inline u32 __intentional_overflow(-1) prandom_u32_max(u32 ep_ro)
93077 {
93078 return (u32)(((u64) prandom_u32() * ep_ro) >> 32);
93079 }
93080diff --git a/include/linux/rbtree_augmented.h b/include/linux/rbtree_augmented.h
93081index 378c5ee..aa84a47 100644
93082--- a/include/linux/rbtree_augmented.h
93083+++ b/include/linux/rbtree_augmented.h
93084@@ -90,7 +90,9 @@ rbname ## _rotate(struct rb_node *rb_old, struct rb_node *rb_new) \
93085 old->rbaugmented = rbcompute(old); \
93086 } \
93087 rbstatic const struct rb_augment_callbacks rbname = { \
93088- rbname ## _propagate, rbname ## _copy, rbname ## _rotate \
93089+ .propagate = rbname ## _propagate, \
93090+ .copy = rbname ## _copy, \
93091+ .rotate = rbname ## _rotate \
93092 };
93093
93094
93095diff --git a/include/linux/rculist.h b/include/linux/rculist.h
93096index a18b16f..2683096 100644
93097--- a/include/linux/rculist.h
93098+++ b/include/linux/rculist.h
93099@@ -29,8 +29,8 @@
93100 */
93101 static inline void INIT_LIST_HEAD_RCU(struct list_head *list)
93102 {
93103- ACCESS_ONCE(list->next) = list;
93104- ACCESS_ONCE(list->prev) = list;
93105+ ACCESS_ONCE_RW(list->next) = list;
93106+ ACCESS_ONCE_RW(list->prev) = list;
93107 }
93108
93109 /*
93110@@ -59,6 +59,9 @@ void __list_add_rcu(struct list_head *new,
93111 struct list_head *prev, struct list_head *next);
93112 #endif
93113
93114+void __pax_list_add_rcu(struct list_head *new,
93115+ struct list_head *prev, struct list_head *next);
93116+
93117 /**
93118 * list_add_rcu - add a new entry to rcu-protected list
93119 * @new: new entry to be added
93120@@ -80,6 +83,11 @@ static inline void list_add_rcu(struct list_head *new, struct list_head *head)
93121 __list_add_rcu(new, head, head->next);
93122 }
93123
93124+static inline void pax_list_add_rcu(struct list_head *new, struct list_head *head)
93125+{
93126+ __pax_list_add_rcu(new, head, head->next);
93127+}
93128+
93129 /**
93130 * list_add_tail_rcu - add a new entry to rcu-protected list
93131 * @new: new entry to be added
93132@@ -102,6 +110,12 @@ static inline void list_add_tail_rcu(struct list_head *new,
93133 __list_add_rcu(new, head->prev, head);
93134 }
93135
93136+static inline void pax_list_add_tail_rcu(struct list_head *new,
93137+ struct list_head *head)
93138+{
93139+ __pax_list_add_rcu(new, head->prev, head);
93140+}
93141+
93142 /**
93143 * list_del_rcu - deletes entry from list without re-initialization
93144 * @entry: the element to delete from the list.
93145@@ -132,6 +146,8 @@ static inline void list_del_rcu(struct list_head *entry)
93146 entry->prev = LIST_POISON2;
93147 }
93148
93149+extern void pax_list_del_rcu(struct list_head *entry);
93150+
93151 /**
93152 * hlist_del_init_rcu - deletes entry from hash list with re-initialization
93153 * @n: the element to delete from the hash list.
93154diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
93155index 7809749..1cd9315 100644
93156--- a/include/linux/rcupdate.h
93157+++ b/include/linux/rcupdate.h
93158@@ -333,7 +333,7 @@ extern struct srcu_struct tasks_rcu_exit_srcu;
93159 do { \
93160 rcu_all_qs(); \
93161 if (ACCESS_ONCE((t)->rcu_tasks_holdout)) \
93162- ACCESS_ONCE((t)->rcu_tasks_holdout) = false; \
93163+ ACCESS_ONCE_RW((t)->rcu_tasks_holdout) = false; \
93164 } while (0)
93165 #else /* #ifdef CONFIG_TASKS_RCU */
93166 #define TASKS_RCU(x) do { } while (0)
93167diff --git a/include/linux/reboot.h b/include/linux/reboot.h
93168index 67fc8fc..a90f7d8 100644
93169--- a/include/linux/reboot.h
93170+++ b/include/linux/reboot.h
93171@@ -47,9 +47,9 @@ extern void do_kernel_restart(char *cmd);
93172 */
93173
93174 extern void migrate_to_reboot_cpu(void);
93175-extern void machine_restart(char *cmd);
93176-extern void machine_halt(void);
93177-extern void machine_power_off(void);
93178+extern void machine_restart(char *cmd) __noreturn;
93179+extern void machine_halt(void) __noreturn;
93180+extern void machine_power_off(void) __noreturn;
93181
93182 extern void machine_shutdown(void);
93183 struct pt_regs;
93184@@ -60,9 +60,9 @@ extern void machine_crash_shutdown(struct pt_regs *);
93185 */
93186
93187 extern void kernel_restart_prepare(char *cmd);
93188-extern void kernel_restart(char *cmd);
93189-extern void kernel_halt(void);
93190-extern void kernel_power_off(void);
93191+extern void kernel_restart(char *cmd) __noreturn;
93192+extern void kernel_halt(void) __noreturn;
93193+extern void kernel_power_off(void) __noreturn;
93194
93195 extern int C_A_D; /* for sysctl */
93196 void ctrl_alt_del(void);
93197@@ -76,7 +76,7 @@ extern int orderly_poweroff(bool force);
93198 * Emergency restart, callable from an interrupt handler.
93199 */
93200
93201-extern void emergency_restart(void);
93202+extern void emergency_restart(void) __noreturn;
93203 #include <asm/emergency-restart.h>
93204
93205 #endif /* _LINUX_REBOOT_H */
93206diff --git a/include/linux/regset.h b/include/linux/regset.h
93207index 8e0c9fe..ac4d221 100644
93208--- a/include/linux/regset.h
93209+++ b/include/linux/regset.h
93210@@ -161,7 +161,8 @@ struct user_regset {
93211 unsigned int align;
93212 unsigned int bias;
93213 unsigned int core_note_type;
93214-};
93215+} __do_const;
93216+typedef struct user_regset __no_const user_regset_no_const;
93217
93218 /**
93219 * struct user_regset_view - available regsets
93220diff --git a/include/linux/relay.h b/include/linux/relay.h
93221index d7c8359..818daf5 100644
93222--- a/include/linux/relay.h
93223+++ b/include/linux/relay.h
93224@@ -157,7 +157,7 @@ struct rchan_callbacks
93225 * The callback should return 0 if successful, negative if not.
93226 */
93227 int (*remove_buf_file)(struct dentry *dentry);
93228-};
93229+} __no_const;
93230
93231 /*
93232 * CONFIG_RELAY kernel API, kernel/relay.c
93233diff --git a/include/linux/rio.h b/include/linux/rio.h
93234index 6bda06f..bf39a9b 100644
93235--- a/include/linux/rio.h
93236+++ b/include/linux/rio.h
93237@@ -358,7 +358,7 @@ struct rio_ops {
93238 int (*map_inb)(struct rio_mport *mport, dma_addr_t lstart,
93239 u64 rstart, u32 size, u32 flags);
93240 void (*unmap_inb)(struct rio_mport *mport, dma_addr_t lstart);
93241-};
93242+} __no_const;
93243
93244 #define RIO_RESOURCE_MEM 0x00000100
93245 #define RIO_RESOURCE_DOORBELL 0x00000200
93246diff --git a/include/linux/rmap.h b/include/linux/rmap.h
93247index c4c559a..6ba9a26 100644
93248--- a/include/linux/rmap.h
93249+++ b/include/linux/rmap.h
93250@@ -154,8 +154,8 @@ static inline void anon_vma_unlock_read(struct anon_vma *anon_vma)
93251 void anon_vma_init(void); /* create anon_vma_cachep */
93252 int anon_vma_prepare(struct vm_area_struct *);
93253 void unlink_anon_vmas(struct vm_area_struct *);
93254-int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
93255-int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
93256+int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
93257+int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
93258
93259 static inline void anon_vma_merge(struct vm_area_struct *vma,
93260 struct vm_area_struct *next)
93261diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
93262index ed8f9e70..2e627f2 100644
93263--- a/include/linux/scatterlist.h
93264+++ b/include/linux/scatterlist.h
93265@@ -1,6 +1,7 @@
93266 #ifndef _LINUX_SCATTERLIST_H
93267 #define _LINUX_SCATTERLIST_H
93268
93269+#include <linux/sched.h>
93270 #include <linux/string.h>
93271 #include <linux/bug.h>
93272 #include <linux/mm.h>
93273@@ -111,10 +112,17 @@ static inline struct page *sg_page(struct scatterlist *sg)
93274 static inline void sg_set_buf(struct scatterlist *sg, const void *buf,
93275 unsigned int buflen)
93276 {
93277+ const void *realbuf = buf;
93278+
93279+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
93280+ if (object_starts_on_stack(buf))
93281+ realbuf = buf - current->stack + current->lowmem_stack;
93282+#endif
93283+
93284 #ifdef CONFIG_DEBUG_SG
93285- BUG_ON(!virt_addr_valid(buf));
93286+ BUG_ON(!virt_addr_valid(realbuf));
93287 #endif
93288- sg_set_page(sg, virt_to_page(buf), buflen, offset_in_page(buf));
93289+ sg_set_page(sg, virt_to_page(realbuf), buflen, offset_in_page(realbuf));
93290 }
93291
93292 /*
93293diff --git a/include/linux/sched.h b/include/linux/sched.h
93294index 51348f7..8c8b0ba 100644
93295--- a/include/linux/sched.h
93296+++ b/include/linux/sched.h
93297@@ -133,6 +133,7 @@ struct fs_struct;
93298 struct perf_event_context;
93299 struct blk_plug;
93300 struct filename;
93301+struct linux_binprm;
93302
93303 #define VMACACHE_BITS 2
93304 #define VMACACHE_SIZE (1U << VMACACHE_BITS)
93305@@ -420,7 +421,7 @@ extern char __sched_text_start[], __sched_text_end[];
93306 extern int in_sched_functions(unsigned long addr);
93307
93308 #define MAX_SCHEDULE_TIMEOUT LONG_MAX
93309-extern signed long schedule_timeout(signed long timeout);
93310+extern signed long schedule_timeout(signed long timeout) __intentional_overflow(-1);
93311 extern signed long schedule_timeout_interruptible(signed long timeout);
93312 extern signed long schedule_timeout_killable(signed long timeout);
93313 extern signed long schedule_timeout_uninterruptible(signed long timeout);
93314@@ -438,6 +439,19 @@ struct nsproxy;
93315 struct user_namespace;
93316
93317 #ifdef CONFIG_MMU
93318+
93319+#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
93320+extern unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags);
93321+#else
93322+static inline unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags)
93323+{
93324+ return 0;
93325+}
93326+#endif
93327+
93328+extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset);
93329+extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset);
93330+
93331 extern void arch_pick_mmap_layout(struct mm_struct *mm);
93332 extern unsigned long
93333 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
93334@@ -736,6 +750,17 @@ struct signal_struct {
93335 #ifdef CONFIG_TASKSTATS
93336 struct taskstats *stats;
93337 #endif
93338+
93339+#ifdef CONFIG_GRKERNSEC
93340+ u32 curr_ip;
93341+ u32 saved_ip;
93342+ u32 gr_saddr;
93343+ u32 gr_daddr;
93344+ u16 gr_sport;
93345+ u16 gr_dport;
93346+ u8 used_accept:1;
93347+#endif
93348+
93349 #ifdef CONFIG_AUDIT
93350 unsigned audit_tty;
93351 unsigned audit_tty_log_passwd;
93352@@ -762,7 +787,7 @@ struct signal_struct {
93353 struct mutex cred_guard_mutex; /* guard against foreign influences on
93354 * credential calculations
93355 * (notably. ptrace) */
93356-};
93357+} __randomize_layout;
93358
93359 /*
93360 * Bits in flags field of signal_struct.
93361@@ -815,6 +840,14 @@ struct user_struct {
93362 struct key *session_keyring; /* UID's default session keyring */
93363 #endif
93364
93365+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
93366+ unsigned char kernel_banned;
93367+#endif
93368+#ifdef CONFIG_GRKERNSEC_BRUTE
93369+ unsigned char suid_banned;
93370+ unsigned long suid_ban_expires;
93371+#endif
93372+
93373 /* Hash table maintenance information */
93374 struct hlist_node uidhash_node;
93375 kuid_t uid;
93376@@ -822,7 +855,7 @@ struct user_struct {
93377 #ifdef CONFIG_PERF_EVENTS
93378 atomic_long_t locked_vm;
93379 #endif
93380-};
93381+} __randomize_layout;
93382
93383 extern int uids_sysfs_init(void);
93384
93385@@ -1286,6 +1319,9 @@ enum perf_event_task_context {
93386 struct task_struct {
93387 volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
93388 void *stack;
93389+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
93390+ void *lowmem_stack;
93391+#endif
93392 atomic_t usage;
93393 unsigned int flags; /* per process flags, defined below */
93394 unsigned int ptrace;
93395@@ -1419,8 +1455,8 @@ struct task_struct {
93396 struct list_head thread_node;
93397
93398 struct completion *vfork_done; /* for vfork() */
93399- int __user *set_child_tid; /* CLONE_CHILD_SETTID */
93400- int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
93401+ pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
93402+ pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
93403
93404 cputime_t utime, stime, utimescaled, stimescaled;
93405 cputime_t gtime;
93406@@ -1445,11 +1481,6 @@ struct task_struct {
93407 struct task_cputime cputime_expires;
93408 struct list_head cpu_timers[3];
93409
93410-/* process credentials */
93411- const struct cred __rcu *real_cred; /* objective and real subjective task
93412- * credentials (COW) */
93413- const struct cred __rcu *cred; /* effective (overridable) subjective task
93414- * credentials (COW) */
93415 char comm[TASK_COMM_LEN]; /* executable name excluding path
93416 - access with [gs]et_task_comm (which lock
93417 it with task_lock())
93418@@ -1467,6 +1498,10 @@ struct task_struct {
93419 #endif
93420 /* CPU-specific state of this task */
93421 struct thread_struct thread;
93422+/* thread_info moved to task_struct */
93423+#ifdef CONFIG_X86
93424+ struct thread_info tinfo;
93425+#endif
93426 /* filesystem information */
93427 struct fs_struct *fs;
93428 /* open file information */
93429@@ -1541,6 +1576,10 @@ struct task_struct {
93430 gfp_t lockdep_reclaim_gfp;
93431 #endif
93432
93433+/* process credentials */
93434+ const struct cred __rcu *real_cred; /* objective and real subjective task
93435+ * credentials (COW) */
93436+
93437 /* journalling filesystem info */
93438 void *journal_info;
93439
93440@@ -1579,6 +1618,10 @@ struct task_struct {
93441 /* cg_list protected by css_set_lock and tsk->alloc_lock */
93442 struct list_head cg_list;
93443 #endif
93444+
93445+ const struct cred __rcu *cred; /* effective (overridable) subjective task
93446+ * credentials (COW) */
93447+
93448 #ifdef CONFIG_FUTEX
93449 struct robust_list_head __user *robust_list;
93450 #ifdef CONFIG_COMPAT
93451@@ -1690,7 +1733,7 @@ struct task_struct {
93452 * Number of functions that haven't been traced
93453 * because of depth overrun.
93454 */
93455- atomic_t trace_overrun;
93456+ atomic_unchecked_t trace_overrun;
93457 /* Pause for the tracing */
93458 atomic_t tracing_graph_pause;
93459 #endif
93460@@ -1718,7 +1761,78 @@ struct task_struct {
93461 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
93462 unsigned long task_state_change;
93463 #endif
93464-};
93465+
93466+#ifdef CONFIG_GRKERNSEC
93467+ /* grsecurity */
93468+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
93469+ u64 exec_id;
93470+#endif
93471+#ifdef CONFIG_GRKERNSEC_SETXID
93472+ const struct cred *delayed_cred;
93473+#endif
93474+ struct dentry *gr_chroot_dentry;
93475+ struct acl_subject_label *acl;
93476+ struct acl_subject_label *tmpacl;
93477+ struct acl_role_label *role;
93478+ struct file *exec_file;
93479+ unsigned long brute_expires;
93480+ u16 acl_role_id;
93481+ u8 inherited;
93482+ /* is this the task that authenticated to the special role */
93483+ u8 acl_sp_role;
93484+ u8 is_writable;
93485+ u8 brute;
93486+ u8 gr_is_chrooted;
93487+#endif
93488+
93489+} __randomize_layout;
93490+
93491+#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
93492+#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
93493+#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
93494+#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
93495+/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
93496+#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
93497+
93498+#ifdef CONFIG_PAX_SOFTMODE
93499+extern int pax_softmode;
93500+#endif
93501+
93502+extern int pax_check_flags(unsigned long *);
93503+#define PAX_PARSE_FLAGS_FALLBACK (~0UL)
93504+
93505+/* if tsk != current then task_lock must be held on it */
93506+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
93507+static inline unsigned long pax_get_flags(struct task_struct *tsk)
93508+{
93509+ if (likely(tsk->mm))
93510+ return tsk->mm->pax_flags;
93511+ else
93512+ return 0UL;
93513+}
93514+
93515+/* if tsk != current then task_lock must be held on it */
93516+static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
93517+{
93518+ if (likely(tsk->mm)) {
93519+ tsk->mm->pax_flags = flags;
93520+ return 0;
93521+ }
93522+ return -EINVAL;
93523+}
93524+#endif
93525+
93526+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
93527+extern void pax_set_initial_flags(struct linux_binprm *bprm);
93528+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
93529+extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
93530+#endif
93531+
93532+struct path;
93533+extern char *pax_get_path(const struct path *path, char *buf, int buflen);
93534+extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
93535+extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
93536+extern void pax_report_refcount_overflow(struct pt_regs *regs);
93537
93538 /* Future-safe accessor for struct task_struct's cpus_allowed. */
93539 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
93540@@ -1801,7 +1915,7 @@ struct pid_namespace;
93541 pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
93542 struct pid_namespace *ns);
93543
93544-static inline pid_t task_pid_nr(struct task_struct *tsk)
93545+static inline pid_t task_pid_nr(const struct task_struct *tsk)
93546 {
93547 return tsk->pid;
93548 }
93549@@ -2169,6 +2283,25 @@ extern u64 sched_clock_cpu(int cpu);
93550
93551 extern void sched_clock_init(void);
93552
93553+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
93554+static inline void populate_stack(void)
93555+{
93556+ struct task_struct *curtask = current;
93557+ int c;
93558+ int *ptr = curtask->stack;
93559+ int *end = curtask->stack + THREAD_SIZE;
93560+
93561+ while (ptr < end) {
93562+ c = *(volatile int *)ptr;
93563+ ptr += PAGE_SIZE/sizeof(int);
93564+ }
93565+}
93566+#else
93567+static inline void populate_stack(void)
93568+{
93569+}
93570+#endif
93571+
93572 #ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
93573 static inline void sched_clock_tick(void)
93574 {
93575@@ -2302,7 +2435,9 @@ void yield(void);
93576 extern struct exec_domain default_exec_domain;
93577
93578 union thread_union {
93579+#ifndef CONFIG_X86
93580 struct thread_info thread_info;
93581+#endif
93582 unsigned long stack[THREAD_SIZE/sizeof(long)];
93583 };
93584
93585@@ -2335,6 +2470,7 @@ extern struct pid_namespace init_pid_ns;
93586 */
93587
93588 extern struct task_struct *find_task_by_vpid(pid_t nr);
93589+extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
93590 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
93591 struct pid_namespace *ns);
93592
93593@@ -2499,7 +2635,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
93594 extern void exit_itimers(struct signal_struct *);
93595 extern void flush_itimer_signals(void);
93596
93597-extern void do_group_exit(int);
93598+extern __noreturn void do_group_exit(int);
93599
93600 extern int do_execve(struct filename *,
93601 const char __user * const __user *,
93602@@ -2720,9 +2856,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
93603 #define task_stack_end_corrupted(task) \
93604 (*(end_of_stack(task)) != STACK_END_MAGIC)
93605
93606-static inline int object_is_on_stack(void *obj)
93607+static inline int object_starts_on_stack(const void *obj)
93608 {
93609- void *stack = task_stack_page(current);
93610+ const void *stack = task_stack_page(current);
93611
93612 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
93613 }
93614diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
93615index 596a0e0..bea77ec 100644
93616--- a/include/linux/sched/sysctl.h
93617+++ b/include/linux/sched/sysctl.h
93618@@ -34,6 +34,7 @@ enum { sysctl_hung_task_timeout_secs = 0 };
93619 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
93620
93621 extern int sysctl_max_map_count;
93622+extern unsigned long sysctl_heap_stack_gap;
93623
93624 extern unsigned int sysctl_sched_latency;
93625 extern unsigned int sysctl_sched_min_granularity;
93626diff --git a/include/linux/security.h b/include/linux/security.h
93627index a1b7dbd..036f47f 100644
93628--- a/include/linux/security.h
93629+++ b/include/linux/security.h
93630@@ -27,6 +27,7 @@
93631 #include <linux/slab.h>
93632 #include <linux/err.h>
93633 #include <linux/string.h>
93634+#include <linux/grsecurity.h>
93635
93636 struct linux_binprm;
93637 struct cred;
93638@@ -116,8 +117,6 @@ struct seq_file;
93639
93640 extern int cap_netlink_send(struct sock *sk, struct sk_buff *skb);
93641
93642-void reset_security_ops(void);
93643-
93644 #ifdef CONFIG_MMU
93645 extern unsigned long mmap_min_addr;
93646 extern unsigned long dac_mmap_min_addr;
93647@@ -1756,7 +1755,7 @@ struct security_operations {
93648 struct audit_context *actx);
93649 void (*audit_rule_free) (void *lsmrule);
93650 #endif /* CONFIG_AUDIT */
93651-};
93652+} __randomize_layout;
93653
93654 /* prototypes */
93655 extern int security_init(void);
93656diff --git a/include/linux/semaphore.h b/include/linux/semaphore.h
93657index dc368b8..e895209 100644
93658--- a/include/linux/semaphore.h
93659+++ b/include/linux/semaphore.h
93660@@ -37,7 +37,7 @@ static inline void sema_init(struct semaphore *sem, int val)
93661 }
93662
93663 extern void down(struct semaphore *sem);
93664-extern int __must_check down_interruptible(struct semaphore *sem);
93665+extern int __must_check down_interruptible(struct semaphore *sem) __intentional_overflow(-1);
93666 extern int __must_check down_killable(struct semaphore *sem);
93667 extern int __must_check down_trylock(struct semaphore *sem);
93668 extern int __must_check down_timeout(struct semaphore *sem, long jiffies);
93669diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
93670index afbb1fd..e1d205d 100644
93671--- a/include/linux/seq_file.h
93672+++ b/include/linux/seq_file.h
93673@@ -27,6 +27,9 @@ struct seq_file {
93674 struct mutex lock;
93675 const struct seq_operations *op;
93676 int poll_event;
93677+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
93678+ u64 exec_id;
93679+#endif
93680 #ifdef CONFIG_USER_NS
93681 struct user_namespace *user_ns;
93682 #endif
93683@@ -39,6 +42,7 @@ struct seq_operations {
93684 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
93685 int (*show) (struct seq_file *m, void *v);
93686 };
93687+typedef struct seq_operations __no_const seq_operations_no_const;
93688
93689 #define SEQ_SKIP 1
93690
93691@@ -111,6 +115,7 @@ void seq_pad(struct seq_file *m, char c);
93692
93693 char *mangle_path(char *s, const char *p, const char *esc);
93694 int seq_open(struct file *, const struct seq_operations *);
93695+int seq_open_restrict(struct file *, const struct seq_operations *);
93696 ssize_t seq_read(struct file *, char __user *, size_t, loff_t *);
93697 loff_t seq_lseek(struct file *, loff_t, int);
93698 int seq_release(struct inode *, struct file *);
93699@@ -128,6 +133,7 @@ int seq_path_root(struct seq_file *m, const struct path *path,
93700 const struct path *root, const char *esc);
93701
93702 int single_open(struct file *, int (*)(struct seq_file *, void *), void *);
93703+int single_open_restrict(struct file *, int (*)(struct seq_file *, void *), void *);
93704 int single_open_size(struct file *, int (*)(struct seq_file *, void *), void *, size_t);
93705 int single_release(struct inode *, struct file *);
93706 void *__seq_open_private(struct file *, const struct seq_operations *, int);
93707diff --git a/include/linux/shm.h b/include/linux/shm.h
93708index 6fb8016..ab4465e 100644
93709--- a/include/linux/shm.h
93710+++ b/include/linux/shm.h
93711@@ -22,6 +22,10 @@ struct shmid_kernel /* private to the kernel */
93712 /* The task created the shm object. NULL if the task is dead. */
93713 struct task_struct *shm_creator;
93714 struct list_head shm_clist; /* list by creator */
93715+#ifdef CONFIG_GRKERNSEC
93716+ u64 shm_createtime;
93717+ pid_t shm_lapid;
93718+#endif
93719 };
93720
93721 /* shm_mode upper byte flags */
93722diff --git a/include/linux/signal.h b/include/linux/signal.h
93723index ab1e039..ad4229e 100644
93724--- a/include/linux/signal.h
93725+++ b/include/linux/signal.h
93726@@ -289,7 +289,7 @@ static inline void allow_signal(int sig)
93727 * know it'll be handled, so that they don't get converted to
93728 * SIGKILL or just silently dropped.
93729 */
93730- kernel_sigaction(sig, (__force __sighandler_t)2);
93731+ kernel_sigaction(sig, (__force_user __sighandler_t)2);
93732 }
93733
93734 static inline void disallow_signal(int sig)
93735diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
93736index bdccc4b..e9f8670 100644
93737--- a/include/linux/skbuff.h
93738+++ b/include/linux/skbuff.h
93739@@ -771,7 +771,7 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags,
93740 int node);
93741 struct sk_buff *__build_skb(void *data, unsigned int frag_size);
93742 struct sk_buff *build_skb(void *data, unsigned int frag_size);
93743-static inline struct sk_buff *alloc_skb(unsigned int size,
93744+static inline struct sk_buff * __intentional_overflow(0) alloc_skb(unsigned int size,
93745 gfp_t priority)
93746 {
93747 return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
93748@@ -1967,7 +1967,7 @@ static inline u32 skb_inner_network_header_len(const struct sk_buff *skb)
93749 return skb->inner_transport_header - skb->inner_network_header;
93750 }
93751
93752-static inline int skb_network_offset(const struct sk_buff *skb)
93753+static inline int __intentional_overflow(0) skb_network_offset(const struct sk_buff *skb)
93754 {
93755 return skb_network_header(skb) - skb->data;
93756 }
93757@@ -2027,7 +2027,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
93758 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
93759 */
93760 #ifndef NET_SKB_PAD
93761-#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
93762+#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
93763 #endif
93764
93765 int ___pskb_trim(struct sk_buff *skb, unsigned int len);
93766@@ -2669,9 +2669,9 @@ struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
93767 int *err);
93768 unsigned int datagram_poll(struct file *file, struct socket *sock,
93769 struct poll_table_struct *wait);
93770-int skb_copy_datagram_iter(const struct sk_buff *from, int offset,
93771+int __intentional_overflow(0) skb_copy_datagram_iter(const struct sk_buff *from, int offset,
93772 struct iov_iter *to, int size);
93773-static inline int skb_copy_datagram_msg(const struct sk_buff *from, int offset,
93774+static inline int __intentional_overflow(2,4) skb_copy_datagram_msg(const struct sk_buff *from, int offset,
93775 struct msghdr *msg, int size)
93776 {
93777 return skb_copy_datagram_iter(from, offset, &msg->msg_iter, size);
93778@@ -3193,6 +3193,9 @@ static inline void nf_reset(struct sk_buff *skb)
93779 nf_bridge_put(skb->nf_bridge);
93780 skb->nf_bridge = NULL;
93781 #endif
93782+#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
93783+ skb->nf_trace = 0;
93784+#endif
93785 }
93786
93787 static inline void nf_reset_trace(struct sk_buff *skb)
93788diff --git a/include/linux/slab.h b/include/linux/slab.h
93789index 76f1fee..d95e6d2 100644
93790--- a/include/linux/slab.h
93791+++ b/include/linux/slab.h
93792@@ -14,15 +14,29 @@
93793 #include <linux/gfp.h>
93794 #include <linux/types.h>
93795 #include <linux/workqueue.h>
93796-
93797+#include <linux/err.h>
93798
93799 /*
93800 * Flags to pass to kmem_cache_create().
93801 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
93802 */
93803 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
93804+
93805+#ifdef CONFIG_PAX_USERCOPY_SLABS
93806+#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
93807+#else
93808+#define SLAB_USERCOPY 0x00000000UL
93809+#endif
93810+
93811 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
93812 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
93813+
93814+#ifdef CONFIG_PAX_MEMORY_SANITIZE
93815+#define SLAB_NO_SANITIZE 0x00001000UL /* PaX: Do not sanitize objs on free */
93816+#else
93817+#define SLAB_NO_SANITIZE 0x00000000UL
93818+#endif
93819+
93820 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
93821 #define SLAB_CACHE_DMA 0x00004000UL /* Use GFP_DMA memory */
93822 #define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */
93823@@ -98,10 +112,13 @@
93824 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
93825 * Both make kfree a no-op.
93826 */
93827-#define ZERO_SIZE_PTR ((void *)16)
93828+#define ZERO_SIZE_PTR \
93829+({ \
93830+ BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
93831+ (void *)(-MAX_ERRNO-1L); \
93832+})
93833
93834-#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
93835- (unsigned long)ZERO_SIZE_PTR)
93836+#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
93837
93838 #include <linux/kmemleak.h>
93839 #include <linux/kasan.h>
93840@@ -143,6 +160,8 @@ void * __must_check krealloc(const void *, size_t, gfp_t);
93841 void kfree(const void *);
93842 void kzfree(const void *);
93843 size_t ksize(const void *);
93844+const char *check_heap_object(const void *ptr, unsigned long n);
93845+bool is_usercopy_object(const void *ptr);
93846
93847 /*
93848 * Some archs want to perform DMA into kmalloc caches and need a guaranteed
93849@@ -235,6 +254,10 @@ extern struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
93850 extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
93851 #endif
93852
93853+#ifdef CONFIG_PAX_USERCOPY_SLABS
93854+extern struct kmem_cache *kmalloc_usercopy_caches[KMALLOC_SHIFT_HIGH + 1];
93855+#endif
93856+
93857 /*
93858 * Figure out which kmalloc slab an allocation of a certain size
93859 * belongs to.
93860@@ -243,7 +266,7 @@ extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
93861 * 2 = 120 .. 192 bytes
93862 * n = 2^(n-1) .. 2^n -1
93863 */
93864-static __always_inline int kmalloc_index(size_t size)
93865+static __always_inline __size_overflow(1) int kmalloc_index(size_t size)
93866 {
93867 if (!size)
93868 return 0;
93869@@ -286,15 +309,15 @@ static __always_inline int kmalloc_index(size_t size)
93870 }
93871 #endif /* !CONFIG_SLOB */
93872
93873-void *__kmalloc(size_t size, gfp_t flags);
93874+void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1) __size_overflow(1);
93875 void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags);
93876 void kmem_cache_free(struct kmem_cache *, void *);
93877
93878 #ifdef CONFIG_NUMA
93879-void *__kmalloc_node(size_t size, gfp_t flags, int node);
93880+void *__kmalloc_node(size_t size, gfp_t flags, int node) __alloc_size(1) __size_overflow(1);
93881 void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
93882 #else
93883-static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
93884+static __always_inline void * __size_overflow(1) __kmalloc_node(size_t size, gfp_t flags, int node)
93885 {
93886 return __kmalloc(size, flags);
93887 }
93888diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
93889index 33d0490..70a6313 100644
93890--- a/include/linux/slab_def.h
93891+++ b/include/linux/slab_def.h
93892@@ -40,7 +40,7 @@ struct kmem_cache {
93893 /* 4) cache creation/removal */
93894 const char *name;
93895 struct list_head list;
93896- int refcount;
93897+ atomic_t refcount;
93898 int object_size;
93899 int align;
93900
93901@@ -56,10 +56,14 @@ struct kmem_cache {
93902 unsigned long node_allocs;
93903 unsigned long node_frees;
93904 unsigned long node_overflow;
93905- atomic_t allochit;
93906- atomic_t allocmiss;
93907- atomic_t freehit;
93908- atomic_t freemiss;
93909+ atomic_unchecked_t allochit;
93910+ atomic_unchecked_t allocmiss;
93911+ atomic_unchecked_t freehit;
93912+ atomic_unchecked_t freemiss;
93913+#ifdef CONFIG_PAX_MEMORY_SANITIZE
93914+ atomic_unchecked_t sanitized;
93915+ atomic_unchecked_t not_sanitized;
93916+#endif
93917
93918 /*
93919 * If debugging is enabled, then the allocator can add additional
93920diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
93921index 3388511..6252f90 100644
93922--- a/include/linux/slub_def.h
93923+++ b/include/linux/slub_def.h
93924@@ -74,7 +74,7 @@ struct kmem_cache {
93925 struct kmem_cache_order_objects max;
93926 struct kmem_cache_order_objects min;
93927 gfp_t allocflags; /* gfp flags to use on each alloc */
93928- int refcount; /* Refcount for slab cache destroy */
93929+ atomic_t refcount; /* Refcount for slab cache destroy */
93930 void (*ctor)(void *);
93931 int inuse; /* Offset to metadata */
93932 int align; /* Alignment */
93933diff --git a/include/linux/smp.h b/include/linux/smp.h
93934index be91db2..3f23232 100644
93935--- a/include/linux/smp.h
93936+++ b/include/linux/smp.h
93937@@ -183,7 +183,9 @@ static inline void smp_init(void) { }
93938 #endif
93939
93940 #define get_cpu() ({ preempt_disable(); smp_processor_id(); })
93941+#define raw_get_cpu() ({ raw_preempt_disable(); raw_smp_processor_id(); })
93942 #define put_cpu() preempt_enable()
93943+#define raw_put_cpu_no_resched() raw_preempt_enable_no_resched()
93944
93945 /*
93946 * Callback to arch code if there's nosmp or maxcpus=0 on the
93947diff --git a/include/linux/sock_diag.h b/include/linux/sock_diag.h
93948index 46cca4c..3323536 100644
93949--- a/include/linux/sock_diag.h
93950+++ b/include/linux/sock_diag.h
93951@@ -11,7 +11,7 @@ struct sock;
93952 struct sock_diag_handler {
93953 __u8 family;
93954 int (*dump)(struct sk_buff *skb, struct nlmsghdr *nlh);
93955-};
93956+} __do_const;
93957
93958 int sock_diag_register(const struct sock_diag_handler *h);
93959 void sock_diag_unregister(const struct sock_diag_handler *h);
93960diff --git a/include/linux/sonet.h b/include/linux/sonet.h
93961index 680f9a3..f13aeb0 100644
93962--- a/include/linux/sonet.h
93963+++ b/include/linux/sonet.h
93964@@ -7,7 +7,7 @@
93965 #include <uapi/linux/sonet.h>
93966
93967 struct k_sonet_stats {
93968-#define __HANDLE_ITEM(i) atomic_t i
93969+#define __HANDLE_ITEM(i) atomic_unchecked_t i
93970 __SONET_ITEMS
93971 #undef __HANDLE_ITEM
93972 };
93973diff --git a/include/linux/sunrpc/addr.h b/include/linux/sunrpc/addr.h
93974index 07d8e53..dc934c9 100644
93975--- a/include/linux/sunrpc/addr.h
93976+++ b/include/linux/sunrpc/addr.h
93977@@ -23,9 +23,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap)
93978 {
93979 switch (sap->sa_family) {
93980 case AF_INET:
93981- return ntohs(((struct sockaddr_in *)sap)->sin_port);
93982+ return ntohs(((const struct sockaddr_in *)sap)->sin_port);
93983 case AF_INET6:
93984- return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
93985+ return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
93986 }
93987 return 0;
93988 }
93989@@ -58,7 +58,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
93990 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
93991 const struct sockaddr *src)
93992 {
93993- const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
93994+ const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
93995 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
93996
93997 dsin->sin_family = ssin->sin_family;
93998@@ -164,7 +164,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
93999 if (sa->sa_family != AF_INET6)
94000 return 0;
94001
94002- return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
94003+ return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
94004 }
94005
94006 #endif /* _LINUX_SUNRPC_ADDR_H */
94007diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
94008index 598ba80..d90cba6 100644
94009--- a/include/linux/sunrpc/clnt.h
94010+++ b/include/linux/sunrpc/clnt.h
94011@@ -100,7 +100,7 @@ struct rpc_procinfo {
94012 unsigned int p_timer; /* Which RTT timer to use */
94013 u32 p_statidx; /* Which procedure to account */
94014 const char * p_name; /* name of procedure */
94015-};
94016+} __do_const;
94017
94018 #ifdef __KERNEL__
94019
94020diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
94021index fae6fb9..023fbcd 100644
94022--- a/include/linux/sunrpc/svc.h
94023+++ b/include/linux/sunrpc/svc.h
94024@@ -420,7 +420,7 @@ struct svc_procedure {
94025 unsigned int pc_count; /* call count */
94026 unsigned int pc_cachetype; /* cache info (NFS) */
94027 unsigned int pc_xdrressize; /* maximum size of XDR reply */
94028-};
94029+} __do_const;
94030
94031 /*
94032 * Function prototypes.
94033diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
94034index df8edf8..d140fec 100644
94035--- a/include/linux/sunrpc/svc_rdma.h
94036+++ b/include/linux/sunrpc/svc_rdma.h
94037@@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
94038 extern unsigned int svcrdma_max_requests;
94039 extern unsigned int svcrdma_max_req_size;
94040
94041-extern atomic_t rdma_stat_recv;
94042-extern atomic_t rdma_stat_read;
94043-extern atomic_t rdma_stat_write;
94044-extern atomic_t rdma_stat_sq_starve;
94045-extern atomic_t rdma_stat_rq_starve;
94046-extern atomic_t rdma_stat_rq_poll;
94047-extern atomic_t rdma_stat_rq_prod;
94048-extern atomic_t rdma_stat_sq_poll;
94049-extern atomic_t rdma_stat_sq_prod;
94050+extern atomic_unchecked_t rdma_stat_recv;
94051+extern atomic_unchecked_t rdma_stat_read;
94052+extern atomic_unchecked_t rdma_stat_write;
94053+extern atomic_unchecked_t rdma_stat_sq_starve;
94054+extern atomic_unchecked_t rdma_stat_rq_starve;
94055+extern atomic_unchecked_t rdma_stat_rq_poll;
94056+extern atomic_unchecked_t rdma_stat_rq_prod;
94057+extern atomic_unchecked_t rdma_stat_sq_poll;
94058+extern atomic_unchecked_t rdma_stat_sq_prod;
94059
94060 /*
94061 * Contexts are built when an RDMA request is created and are a
94062diff --git a/include/linux/sunrpc/svcauth.h b/include/linux/sunrpc/svcauth.h
94063index 8d71d65..f79586e 100644
94064--- a/include/linux/sunrpc/svcauth.h
94065+++ b/include/linux/sunrpc/svcauth.h
94066@@ -120,7 +120,7 @@ struct auth_ops {
94067 int (*release)(struct svc_rqst *rq);
94068 void (*domain_release)(struct auth_domain *);
94069 int (*set_client)(struct svc_rqst *rq);
94070-};
94071+} __do_const;
94072
94073 #define SVC_GARBAGE 1
94074 #define SVC_SYSERR 2
94075diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
94076index e7a018e..49f8b17 100644
94077--- a/include/linux/swiotlb.h
94078+++ b/include/linux/swiotlb.h
94079@@ -60,7 +60,8 @@ extern void
94080
94081 extern void
94082 swiotlb_free_coherent(struct device *hwdev, size_t size,
94083- void *vaddr, dma_addr_t dma_handle);
94084+ void *vaddr, dma_addr_t dma_handle,
94085+ struct dma_attrs *attrs);
94086
94087 extern dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
94088 unsigned long offset, size_t size,
94089diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
94090index 76d1e38..200776e 100644
94091--- a/include/linux/syscalls.h
94092+++ b/include/linux/syscalls.h
94093@@ -102,7 +102,12 @@ union bpf_attr;
94094 #define __TYPE_IS_L(t) (__same_type((t)0, 0L))
94095 #define __TYPE_IS_UL(t) (__same_type((t)0, 0UL))
94096 #define __TYPE_IS_LL(t) (__same_type((t)0, 0LL) || __same_type((t)0, 0ULL))
94097-#define __SC_LONG(t, a) __typeof(__builtin_choose_expr(__TYPE_IS_LL(t), 0LL, 0L)) a
94098+#define __SC_LONG(t, a) __typeof__( \
94099+ __builtin_choose_expr( \
94100+ sizeof(t) > sizeof(int), \
94101+ (t) 0, \
94102+ __builtin_choose_expr(__type_is_unsigned(t), 0UL, 0L) \
94103+ )) a
94104 #define __SC_CAST(t, a) (t) a
94105 #define __SC_ARGS(t, a) a
94106 #define __SC_TEST(t, a) (void)BUILD_BUG_ON_ZERO(!__TYPE_IS_LL(t) && sizeof(t) > sizeof(long))
94107@@ -384,11 +389,11 @@ asmlinkage long sys_sync(void);
94108 asmlinkage long sys_fsync(unsigned int fd);
94109 asmlinkage long sys_fdatasync(unsigned int fd);
94110 asmlinkage long sys_bdflush(int func, long data);
94111-asmlinkage long sys_mount(char __user *dev_name, char __user *dir_name,
94112- char __user *type, unsigned long flags,
94113+asmlinkage long sys_mount(const char __user *dev_name, const char __user *dir_name,
94114+ const char __user *type, unsigned long flags,
94115 void __user *data);
94116-asmlinkage long sys_umount(char __user *name, int flags);
94117-asmlinkage long sys_oldumount(char __user *name);
94118+asmlinkage long sys_umount(const char __user *name, int flags);
94119+asmlinkage long sys_oldumount(const char __user *name);
94120 asmlinkage long sys_truncate(const char __user *path, long length);
94121 asmlinkage long sys_ftruncate(unsigned int fd, unsigned long length);
94122 asmlinkage long sys_stat(const char __user *filename,
94123@@ -604,7 +609,7 @@ asmlinkage long sys_getsockname(int, struct sockaddr __user *, int __user *);
94124 asmlinkage long sys_getpeername(int, struct sockaddr __user *, int __user *);
94125 asmlinkage long sys_send(int, void __user *, size_t, unsigned);
94126 asmlinkage long sys_sendto(int, void __user *, size_t, unsigned,
94127- struct sockaddr __user *, int);
94128+ struct sockaddr __user *, int) __intentional_overflow(0);
94129 asmlinkage long sys_sendmsg(int fd, struct user_msghdr __user *msg, unsigned flags);
94130 asmlinkage long sys_sendmmsg(int fd, struct mmsghdr __user *msg,
94131 unsigned int vlen, unsigned flags);
94132@@ -663,10 +668,10 @@ asmlinkage long sys_msgctl(int msqid, int cmd, struct msqid_ds __user *buf);
94133
94134 asmlinkage long sys_semget(key_t key, int nsems, int semflg);
94135 asmlinkage long sys_semop(int semid, struct sembuf __user *sops,
94136- unsigned nsops);
94137+ long nsops);
94138 asmlinkage long sys_semctl(int semid, int semnum, int cmd, unsigned long arg);
94139 asmlinkage long sys_semtimedop(int semid, struct sembuf __user *sops,
94140- unsigned nsops,
94141+ long nsops,
94142 const struct timespec __user *timeout);
94143 asmlinkage long sys_shmat(int shmid, char __user *shmaddr, int shmflg);
94144 asmlinkage long sys_shmget(key_t key, size_t size, int flag);
94145diff --git a/include/linux/syscore_ops.h b/include/linux/syscore_ops.h
94146index 27b3b0b..e093dd9 100644
94147--- a/include/linux/syscore_ops.h
94148+++ b/include/linux/syscore_ops.h
94149@@ -16,7 +16,7 @@ struct syscore_ops {
94150 int (*suspend)(void);
94151 void (*resume)(void);
94152 void (*shutdown)(void);
94153-};
94154+} __do_const;
94155
94156 extern void register_syscore_ops(struct syscore_ops *ops);
94157 extern void unregister_syscore_ops(struct syscore_ops *ops);
94158diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
94159index b7361f8..341a15a 100644
94160--- a/include/linux/sysctl.h
94161+++ b/include/linux/sysctl.h
94162@@ -39,6 +39,8 @@ typedef int proc_handler (struct ctl_table *ctl, int write,
94163
94164 extern int proc_dostring(struct ctl_table *, int,
94165 void __user *, size_t *, loff_t *);
94166+extern int proc_dostring_modpriv(struct ctl_table *, int,
94167+ void __user *, size_t *, loff_t *);
94168 extern int proc_dointvec(struct ctl_table *, int,
94169 void __user *, size_t *, loff_t *);
94170 extern int proc_dointvec_minmax(struct ctl_table *, int,
94171@@ -113,7 +115,8 @@ struct ctl_table
94172 struct ctl_table_poll *poll;
94173 void *extra1;
94174 void *extra2;
94175-};
94176+} __do_const __randomize_layout;
94177+typedef struct ctl_table __no_const ctl_table_no_const;
94178
94179 struct ctl_node {
94180 struct rb_node node;
94181diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
94182index ddad161..a3efd26 100644
94183--- a/include/linux/sysfs.h
94184+++ b/include/linux/sysfs.h
94185@@ -34,7 +34,8 @@ struct attribute {
94186 struct lock_class_key *key;
94187 struct lock_class_key skey;
94188 #endif
94189-};
94190+} __do_const;
94191+typedef struct attribute __no_const attribute_no_const;
94192
94193 /**
94194 * sysfs_attr_init - initialize a dynamically allocated sysfs attribute
94195@@ -63,7 +64,8 @@ struct attribute_group {
94196 struct attribute *, int);
94197 struct attribute **attrs;
94198 struct bin_attribute **bin_attrs;
94199-};
94200+} __do_const;
94201+typedef struct attribute_group __no_const attribute_group_no_const;
94202
94203 /**
94204 * Use these macros to make defining attributes easier. See include/linux/device.h
94205@@ -137,7 +139,8 @@ struct bin_attribute {
94206 char *, loff_t, size_t);
94207 int (*mmap)(struct file *, struct kobject *, struct bin_attribute *attr,
94208 struct vm_area_struct *vma);
94209-};
94210+} __do_const;
94211+typedef struct bin_attribute __no_const bin_attribute_no_const;
94212
94213 /**
94214 * sysfs_bin_attr_init - initialize a dynamically allocated bin_attribute
94215diff --git a/include/linux/sysrq.h b/include/linux/sysrq.h
94216index 387fa7d..3fcde6b 100644
94217--- a/include/linux/sysrq.h
94218+++ b/include/linux/sysrq.h
94219@@ -16,6 +16,7 @@
94220
94221 #include <linux/errno.h>
94222 #include <linux/types.h>
94223+#include <linux/compiler.h>
94224
94225 /* Possible values of bitmask for enabling sysrq functions */
94226 /* 0x0001 is reserved for enable everything */
94227@@ -33,7 +34,7 @@ struct sysrq_key_op {
94228 char *help_msg;
94229 char *action_msg;
94230 int enable_mask;
94231-};
94232+} __do_const;
94233
94234 #ifdef CONFIG_MAGIC_SYSRQ
94235
94236diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
94237index ff307b5..f1a4468 100644
94238--- a/include/linux/thread_info.h
94239+++ b/include/linux/thread_info.h
94240@@ -145,6 +145,13 @@ static inline bool test_and_clear_restore_sigmask(void)
94241 #error "no set_restore_sigmask() provided and default one won't work"
94242 #endif
94243
94244+extern void __check_object_size(const void *ptr, unsigned long n, bool to_user, bool const_size);
94245+
94246+static inline void check_object_size(const void *ptr, unsigned long n, bool to_user)
94247+{
94248+ __check_object_size(ptr, n, to_user, __builtin_constant_p(n));
94249+}
94250+
94251 #endif /* __KERNEL__ */
94252
94253 #endif /* _LINUX_THREAD_INFO_H */
94254diff --git a/include/linux/tty.h b/include/linux/tty.h
94255index 790752a..36d9b54 100644
94256--- a/include/linux/tty.h
94257+++ b/include/linux/tty.h
94258@@ -225,7 +225,7 @@ struct tty_port {
94259 const struct tty_port_operations *ops; /* Port operations */
94260 spinlock_t lock; /* Lock protecting tty field */
94261 int blocked_open; /* Waiting to open */
94262- int count; /* Usage count */
94263+ atomic_t count; /* Usage count */
94264 wait_queue_head_t open_wait; /* Open waiters */
94265 wait_queue_head_t close_wait; /* Close waiters */
94266 wait_queue_head_t delta_msr_wait; /* Modem status change */
94267@@ -313,7 +313,7 @@ struct tty_struct {
94268 /* If the tty has a pending do_SAK, queue it here - akpm */
94269 struct work_struct SAK_work;
94270 struct tty_port *port;
94271-};
94272+} __randomize_layout;
94273
94274 /* Each of a tty's open files has private_data pointing to tty_file_private */
94275 struct tty_file_private {
94276@@ -572,7 +572,7 @@ extern int tty_port_open(struct tty_port *port,
94277 struct tty_struct *tty, struct file *filp);
94278 static inline int tty_port_users(struct tty_port *port)
94279 {
94280- return port->count + port->blocked_open;
94281+ return atomic_read(&port->count) + port->blocked_open;
94282 }
94283
94284 extern int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc);
94285diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h
94286index 92e337c..f46757b 100644
94287--- a/include/linux/tty_driver.h
94288+++ b/include/linux/tty_driver.h
94289@@ -291,7 +291,7 @@ struct tty_operations {
94290 void (*poll_put_char)(struct tty_driver *driver, int line, char ch);
94291 #endif
94292 const struct file_operations *proc_fops;
94293-};
94294+} __do_const __randomize_layout;
94295
94296 struct tty_driver {
94297 int magic; /* magic number for this structure */
94298@@ -325,7 +325,7 @@ struct tty_driver {
94299
94300 const struct tty_operations *ops;
94301 struct list_head tty_drivers;
94302-};
94303+} __randomize_layout;
94304
94305 extern struct list_head tty_drivers;
94306
94307diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
94308index 00c9d68..bc0188b 100644
94309--- a/include/linux/tty_ldisc.h
94310+++ b/include/linux/tty_ldisc.h
94311@@ -215,7 +215,7 @@ struct tty_ldisc_ops {
94312
94313 struct module *owner;
94314
94315- int refcount;
94316+ atomic_t refcount;
94317 };
94318
94319 struct tty_ldisc {
94320diff --git a/include/linux/types.h b/include/linux/types.h
94321index 6747247..fc7ec8b 100644
94322--- a/include/linux/types.h
94323+++ b/include/linux/types.h
94324@@ -174,10 +174,26 @@ typedef struct {
94325 int counter;
94326 } atomic_t;
94327
94328+#ifdef CONFIG_PAX_REFCOUNT
94329+typedef struct {
94330+ int counter;
94331+} atomic_unchecked_t;
94332+#else
94333+typedef atomic_t atomic_unchecked_t;
94334+#endif
94335+
94336 #ifdef CONFIG_64BIT
94337 typedef struct {
94338 long counter;
94339 } atomic64_t;
94340+
94341+#ifdef CONFIG_PAX_REFCOUNT
94342+typedef struct {
94343+ long counter;
94344+} atomic64_unchecked_t;
94345+#else
94346+typedef atomic64_t atomic64_unchecked_t;
94347+#endif
94348 #endif
94349
94350 struct list_head {
94351diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
94352index ecd3319..8a36ded 100644
94353--- a/include/linux/uaccess.h
94354+++ b/include/linux/uaccess.h
94355@@ -75,11 +75,11 @@ static inline unsigned long __copy_from_user_nocache(void *to,
94356 long ret; \
94357 mm_segment_t old_fs = get_fs(); \
94358 \
94359- set_fs(KERNEL_DS); \
94360 pagefault_disable(); \
94361- ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
94362- pagefault_enable(); \
94363+ set_fs(KERNEL_DS); \
94364+ ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
94365 set_fs(old_fs); \
94366+ pagefault_enable(); \
94367 ret; \
94368 })
94369
94370diff --git a/include/linux/uidgid.h b/include/linux/uidgid.h
94371index 2d1f9b6..d7a9fce 100644
94372--- a/include/linux/uidgid.h
94373+++ b/include/linux/uidgid.h
94374@@ -175,4 +175,9 @@ static inline bool kgid_has_mapping(struct user_namespace *ns, kgid_t gid)
94375
94376 #endif /* CONFIG_USER_NS */
94377
94378+#define GR_GLOBAL_UID(x) from_kuid_munged(&init_user_ns, (x))
94379+#define GR_GLOBAL_GID(x) from_kgid_munged(&init_user_ns, (x))
94380+#define gr_is_global_root(x) uid_eq((x), GLOBAL_ROOT_UID)
94381+#define gr_is_global_nonroot(x) (!uid_eq((x), GLOBAL_ROOT_UID))
94382+
94383 #endif /* _LINUX_UIDGID_H */
94384diff --git a/include/linux/uio_driver.h b/include/linux/uio_driver.h
94385index 32c0e83..671eb35 100644
94386--- a/include/linux/uio_driver.h
94387+++ b/include/linux/uio_driver.h
94388@@ -67,7 +67,7 @@ struct uio_device {
94389 struct module *owner;
94390 struct device *dev;
94391 int minor;
94392- atomic_t event;
94393+ atomic_unchecked_t event;
94394 struct fasync_struct *async_queue;
94395 wait_queue_head_t wait;
94396 struct uio_info *info;
94397diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
94398index 99c1b4d..562e6f3 100644
94399--- a/include/linux/unaligned/access_ok.h
94400+++ b/include/linux/unaligned/access_ok.h
94401@@ -4,34 +4,34 @@
94402 #include <linux/kernel.h>
94403 #include <asm/byteorder.h>
94404
94405-static inline u16 get_unaligned_le16(const void *p)
94406+static inline u16 __intentional_overflow(-1) get_unaligned_le16(const void *p)
94407 {
94408- return le16_to_cpup((__le16 *)p);
94409+ return le16_to_cpup((const __le16 *)p);
94410 }
94411
94412-static inline u32 get_unaligned_le32(const void *p)
94413+static inline u32 __intentional_overflow(-1) get_unaligned_le32(const void *p)
94414 {
94415- return le32_to_cpup((__le32 *)p);
94416+ return le32_to_cpup((const __le32 *)p);
94417 }
94418
94419-static inline u64 get_unaligned_le64(const void *p)
94420+static inline u64 __intentional_overflow(-1) get_unaligned_le64(const void *p)
94421 {
94422- return le64_to_cpup((__le64 *)p);
94423+ return le64_to_cpup((const __le64 *)p);
94424 }
94425
94426-static inline u16 get_unaligned_be16(const void *p)
94427+static inline u16 __intentional_overflow(-1) get_unaligned_be16(const void *p)
94428 {
94429- return be16_to_cpup((__be16 *)p);
94430+ return be16_to_cpup((const __be16 *)p);
94431 }
94432
94433-static inline u32 get_unaligned_be32(const void *p)
94434+static inline u32 __intentional_overflow(-1) get_unaligned_be32(const void *p)
94435 {
94436- return be32_to_cpup((__be32 *)p);
94437+ return be32_to_cpup((const __be32 *)p);
94438 }
94439
94440-static inline u64 get_unaligned_be64(const void *p)
94441+static inline u64 __intentional_overflow(-1) get_unaligned_be64(const void *p)
94442 {
94443- return be64_to_cpup((__be64 *)p);
94444+ return be64_to_cpup((const __be64 *)p);
94445 }
94446
94447 static inline void put_unaligned_le16(u16 val, void *p)
94448diff --git a/include/linux/usb.h b/include/linux/usb.h
94449index 447fe29..9fc875f 100644
94450--- a/include/linux/usb.h
94451+++ b/include/linux/usb.h
94452@@ -592,7 +592,7 @@ struct usb_device {
94453 int maxchild;
94454
94455 u32 quirks;
94456- atomic_t urbnum;
94457+ atomic_unchecked_t urbnum;
94458
94459 unsigned long active_duration;
94460
94461@@ -1676,7 +1676,7 @@ void usb_buffer_unmap_sg(const struct usb_device *dev, int is_in,
94462
94463 extern int usb_control_msg(struct usb_device *dev, unsigned int pipe,
94464 __u8 request, __u8 requesttype, __u16 value, __u16 index,
94465- void *data, __u16 size, int timeout);
94466+ void *data, __u16 size, int timeout) __intentional_overflow(-1);
94467 extern int usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
94468 void *data, int len, int *actual_length, int timeout);
94469 extern int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
94470diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h
94471index 9fd9e48..e2c5f35 100644
94472--- a/include/linux/usb/renesas_usbhs.h
94473+++ b/include/linux/usb/renesas_usbhs.h
94474@@ -39,7 +39,7 @@ enum {
94475 */
94476 struct renesas_usbhs_driver_callback {
94477 int (*notify_hotplug)(struct platform_device *pdev);
94478-};
94479+} __no_const;
94480
94481 /*
94482 * callback functions for platform
94483diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h
94484index 8297e5b..0dfae27 100644
94485--- a/include/linux/user_namespace.h
94486+++ b/include/linux/user_namespace.h
94487@@ -39,7 +39,7 @@ struct user_namespace {
94488 struct key *persistent_keyring_register;
94489 struct rw_semaphore persistent_keyring_register_sem;
94490 #endif
94491-};
94492+} __randomize_layout;
94493
94494 extern struct user_namespace init_user_ns;
94495
94496diff --git a/include/linux/utsname.h b/include/linux/utsname.h
94497index 5093f58..c103e58 100644
94498--- a/include/linux/utsname.h
94499+++ b/include/linux/utsname.h
94500@@ -25,7 +25,7 @@ struct uts_namespace {
94501 struct new_utsname name;
94502 struct user_namespace *user_ns;
94503 struct ns_common ns;
94504-};
94505+} __randomize_layout;
94506 extern struct uts_namespace init_uts_ns;
94507
94508 #ifdef CONFIG_UTS_NS
94509diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
94510index 6f8fbcf..4efc177 100644
94511--- a/include/linux/vermagic.h
94512+++ b/include/linux/vermagic.h
94513@@ -25,9 +25,42 @@
94514 #define MODULE_ARCH_VERMAGIC ""
94515 #endif
94516
94517+#ifdef CONFIG_PAX_REFCOUNT
94518+#define MODULE_PAX_REFCOUNT "REFCOUNT "
94519+#else
94520+#define MODULE_PAX_REFCOUNT ""
94521+#endif
94522+
94523+#ifdef CONSTIFY_PLUGIN
94524+#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
94525+#else
94526+#define MODULE_CONSTIFY_PLUGIN ""
94527+#endif
94528+
94529+#ifdef STACKLEAK_PLUGIN
94530+#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
94531+#else
94532+#define MODULE_STACKLEAK_PLUGIN ""
94533+#endif
94534+
94535+#ifdef RANDSTRUCT_PLUGIN
94536+#include <generated/randomize_layout_hash.h>
94537+#define MODULE_RANDSTRUCT_PLUGIN "RANDSTRUCT_PLUGIN_" RANDSTRUCT_HASHED_SEED
94538+#else
94539+#define MODULE_RANDSTRUCT_PLUGIN
94540+#endif
94541+
94542+#ifdef CONFIG_GRKERNSEC
94543+#define MODULE_GRSEC "GRSEC "
94544+#else
94545+#define MODULE_GRSEC ""
94546+#endif
94547+
94548 #define VERMAGIC_STRING \
94549 UTS_RELEASE " " \
94550 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
94551 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
94552- MODULE_ARCH_VERMAGIC
94553+ MODULE_ARCH_VERMAGIC \
94554+ MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
94555+ MODULE_GRSEC MODULE_RANDSTRUCT_PLUGIN
94556
94557diff --git a/include/linux/vga_switcheroo.h b/include/linux/vga_switcheroo.h
94558index b483abd..af305ad 100644
94559--- a/include/linux/vga_switcheroo.h
94560+++ b/include/linux/vga_switcheroo.h
94561@@ -63,9 +63,9 @@ int vga_switcheroo_get_client_state(struct pci_dev *dev);
94562
94563 void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic);
94564
94565-int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain);
94566+int vga_switcheroo_init_domain_pm_ops(struct device *dev, dev_pm_domain_no_const *domain);
94567 void vga_switcheroo_fini_domain_pm_ops(struct device *dev);
94568-int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain);
94569+int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, dev_pm_domain_no_const *domain);
94570 #else
94571
94572 static inline void vga_switcheroo_unregister_client(struct pci_dev *dev) {}
94573@@ -82,9 +82,9 @@ static inline int vga_switcheroo_get_client_state(struct pci_dev *dev) { return
94574
94575 static inline void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic) {}
94576
94577-static inline int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; }
94578+static inline int vga_switcheroo_init_domain_pm_ops(struct device *dev, dev_pm_domain_no_const *domain) { return -EINVAL; }
94579 static inline void vga_switcheroo_fini_domain_pm_ops(struct device *dev) {}
94580-static inline int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; }
94581+static inline int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, dev_pm_domain_no_const *domain) { return -EINVAL; }
94582
94583 #endif
94584 #endif /* _LINUX_VGA_SWITCHEROO_H_ */
94585diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
94586index 0ec5983..cc61051 100644
94587--- a/include/linux/vmalloc.h
94588+++ b/include/linux/vmalloc.h
94589@@ -18,6 +18,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
94590 #define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */
94591 #define VM_NO_GUARD 0x00000040 /* don't add guard page */
94592 #define VM_KASAN 0x00000080 /* has allocated kasan shadow memory */
94593+
94594+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
94595+#define VM_KERNEXEC 0x00000100 /* allocate from executable kernel memory range */
94596+#endif
94597+
94598 /* bits [20..32] reserved for arch specific ioremap internals */
94599
94600 /*
94601@@ -86,6 +91,10 @@ extern void *vmap(struct page **pages, unsigned int count,
94602 unsigned long flags, pgprot_t prot);
94603 extern void vunmap(const void *addr);
94604
94605+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
94606+extern void unmap_process_stacks(struct task_struct *task);
94607+#endif
94608+
94609 extern int remap_vmalloc_range_partial(struct vm_area_struct *vma,
94610 unsigned long uaddr, void *kaddr,
94611 unsigned long size);
94612@@ -150,7 +159,7 @@ extern void free_vm_area(struct vm_struct *area);
94613
94614 /* for /dev/kmem */
94615 extern long vread(char *buf, char *addr, unsigned long count);
94616-extern long vwrite(char *buf, char *addr, unsigned long count);
94617+extern long vwrite(char *buf, char *addr, unsigned long count) __size_overflow(3);
94618
94619 /*
94620 * Internals. Dont't use..
94621diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
94622index 82e7db7..f8ce3d0 100644
94623--- a/include/linux/vmstat.h
94624+++ b/include/linux/vmstat.h
94625@@ -108,18 +108,18 @@ static inline void vm_events_fold_cpu(int cpu)
94626 /*
94627 * Zone based page accounting with per cpu differentials.
94628 */
94629-extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
94630+extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
94631
94632 static inline void zone_page_state_add(long x, struct zone *zone,
94633 enum zone_stat_item item)
94634 {
94635- atomic_long_add(x, &zone->vm_stat[item]);
94636- atomic_long_add(x, &vm_stat[item]);
94637+ atomic_long_add_unchecked(x, &zone->vm_stat[item]);
94638+ atomic_long_add_unchecked(x, &vm_stat[item]);
94639 }
94640
94641-static inline unsigned long global_page_state(enum zone_stat_item item)
94642+static inline unsigned long __intentional_overflow(-1) global_page_state(enum zone_stat_item item)
94643 {
94644- long x = atomic_long_read(&vm_stat[item]);
94645+ long x = atomic_long_read_unchecked(&vm_stat[item]);
94646 #ifdef CONFIG_SMP
94647 if (x < 0)
94648 x = 0;
94649@@ -127,10 +127,10 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
94650 return x;
94651 }
94652
94653-static inline unsigned long zone_page_state(struct zone *zone,
94654+static inline unsigned long __intentional_overflow(-1) zone_page_state(struct zone *zone,
94655 enum zone_stat_item item)
94656 {
94657- long x = atomic_long_read(&zone->vm_stat[item]);
94658+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
94659 #ifdef CONFIG_SMP
94660 if (x < 0)
94661 x = 0;
94662@@ -147,7 +147,7 @@ static inline unsigned long zone_page_state(struct zone *zone,
94663 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
94664 enum zone_stat_item item)
94665 {
94666- long x = atomic_long_read(&zone->vm_stat[item]);
94667+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
94668
94669 #ifdef CONFIG_SMP
94670 int cpu;
94671@@ -234,14 +234,14 @@ static inline void __mod_zone_page_state(struct zone *zone,
94672
94673 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
94674 {
94675- atomic_long_inc(&zone->vm_stat[item]);
94676- atomic_long_inc(&vm_stat[item]);
94677+ atomic_long_inc_unchecked(&zone->vm_stat[item]);
94678+ atomic_long_inc_unchecked(&vm_stat[item]);
94679 }
94680
94681 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
94682 {
94683- atomic_long_dec(&zone->vm_stat[item]);
94684- atomic_long_dec(&vm_stat[item]);
94685+ atomic_long_dec_unchecked(&zone->vm_stat[item]);
94686+ atomic_long_dec_unchecked(&vm_stat[item]);
94687 }
94688
94689 static inline void __inc_zone_page_state(struct page *page,
94690diff --git a/include/linux/xattr.h b/include/linux/xattr.h
94691index 91b0a68..0e9adf6 100644
94692--- a/include/linux/xattr.h
94693+++ b/include/linux/xattr.h
94694@@ -28,7 +28,7 @@ struct xattr_handler {
94695 size_t size, int handler_flags);
94696 int (*set)(struct dentry *dentry, const char *name, const void *buffer,
94697 size_t size, int flags, int handler_flags);
94698-};
94699+} __do_const;
94700
94701 struct xattr {
94702 const char *name;
94703@@ -37,6 +37,9 @@ struct xattr {
94704 };
94705
94706 ssize_t xattr_getsecurity(struct inode *, const char *, void *, size_t);
94707+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
94708+ssize_t pax_getxattr(struct dentry *, void *, size_t);
94709+#endif
94710 ssize_t vfs_getxattr(struct dentry *, const char *, void *, size_t);
94711 ssize_t vfs_listxattr(struct dentry *d, char *list, size_t size);
94712 int __vfs_setxattr_noperm(struct dentry *, const char *, const void *, size_t, int);
94713diff --git a/include/linux/zlib.h b/include/linux/zlib.h
94714index 92dbbd3..13ab0b3 100644
94715--- a/include/linux/zlib.h
94716+++ b/include/linux/zlib.h
94717@@ -31,6 +31,7 @@
94718 #define _ZLIB_H
94719
94720 #include <linux/zconf.h>
94721+#include <linux/compiler.h>
94722
94723 /* zlib deflate based on ZLIB_VERSION "1.1.3" */
94724 /* zlib inflate based on ZLIB_VERSION "1.2.3" */
94725@@ -179,7 +180,7 @@ typedef z_stream *z_streamp;
94726
94727 /* basic functions */
94728
94729-extern int zlib_deflate_workspacesize (int windowBits, int memLevel);
94730+extern int zlib_deflate_workspacesize (int windowBits, int memLevel) __intentional_overflow(0);
94731 /*
94732 Returns the number of bytes that needs to be allocated for a per-
94733 stream workspace with the specified parameters. A pointer to this
94734diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
94735index 3e4fddf..5ec9104 100644
94736--- a/include/media/v4l2-dev.h
94737+++ b/include/media/v4l2-dev.h
94738@@ -75,7 +75,7 @@ struct v4l2_file_operations {
94739 int (*mmap) (struct file *, struct vm_area_struct *);
94740 int (*open) (struct file *);
94741 int (*release) (struct file *);
94742-};
94743+} __do_const;
94744
94745 /*
94746 * Newer version of video_device, handled by videodev2.c
94747diff --git a/include/media/v4l2-device.h b/include/media/v4l2-device.h
94748index ffb69da..040393e 100644
94749--- a/include/media/v4l2-device.h
94750+++ b/include/media/v4l2-device.h
94751@@ -95,7 +95,7 @@ int __must_check v4l2_device_register(struct device *dev, struct v4l2_device *v4
94752 this function returns 0. If the name ends with a digit (e.g. cx18),
94753 then the name will be set to cx18-0 since cx180 looks really odd. */
94754 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
94755- atomic_t *instance);
94756+ atomic_unchecked_t *instance);
94757
94758 /* Set v4l2_dev->dev to NULL. Call when the USB parent disconnects.
94759 Since the parent disappears this ensures that v4l2_dev doesn't have an
94760diff --git a/include/net/9p/transport.h b/include/net/9p/transport.h
94761index 2a25dec..bf6dd8a 100644
94762--- a/include/net/9p/transport.h
94763+++ b/include/net/9p/transport.h
94764@@ -62,7 +62,7 @@ struct p9_trans_module {
94765 int (*cancelled)(struct p9_client *, struct p9_req_t *req);
94766 int (*zc_request)(struct p9_client *, struct p9_req_t *,
94767 char *, char *, int , int, int, int);
94768-};
94769+} __do_const;
94770
94771 void v9fs_register_trans(struct p9_trans_module *m);
94772 void v9fs_unregister_trans(struct p9_trans_module *m);
94773diff --git a/include/net/af_unix.h b/include/net/af_unix.h
94774index a175ba4..196eb8242 100644
94775--- a/include/net/af_unix.h
94776+++ b/include/net/af_unix.h
94777@@ -36,7 +36,7 @@ struct unix_skb_parms {
94778 u32 secid; /* Security ID */
94779 #endif
94780 u32 consumed;
94781-};
94782+} __randomize_layout;
94783
94784 #define UNIXCB(skb) (*(struct unix_skb_parms *)&((skb)->cb))
94785 #define UNIXSID(skb) (&UNIXCB((skb)).secid)
94786diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
94787index 2239a37..a83461f 100644
94788--- a/include/net/bluetooth/l2cap.h
94789+++ b/include/net/bluetooth/l2cap.h
94790@@ -609,7 +609,7 @@ struct l2cap_ops {
94791 struct sk_buff *(*alloc_skb) (struct l2cap_chan *chan,
94792 unsigned long hdr_len,
94793 unsigned long len, int nb);
94794-};
94795+} __do_const;
94796
94797 struct l2cap_conn {
94798 struct hci_conn *hcon;
94799diff --git a/include/net/bonding.h b/include/net/bonding.h
94800index fda6fee..dbdf83c 100644
94801--- a/include/net/bonding.h
94802+++ b/include/net/bonding.h
94803@@ -665,7 +665,7 @@ extern struct rtnl_link_ops bond_link_ops;
94804
94805 static inline void bond_tx_drop(struct net_device *dev, struct sk_buff *skb)
94806 {
94807- atomic_long_inc(&dev->tx_dropped);
94808+ atomic_long_inc_unchecked(&dev->tx_dropped);
94809 dev_kfree_skb_any(skb);
94810 }
94811
94812diff --git a/include/net/caif/cfctrl.h b/include/net/caif/cfctrl.h
94813index f2ae33d..c457cf0 100644
94814--- a/include/net/caif/cfctrl.h
94815+++ b/include/net/caif/cfctrl.h
94816@@ -52,7 +52,7 @@ struct cfctrl_rsp {
94817 void (*radioset_rsp)(void);
94818 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
94819 struct cflayer *client_layer);
94820-};
94821+} __no_const;
94822
94823 /* Link Setup Parameters for CAIF-Links. */
94824 struct cfctrl_link_param {
94825@@ -101,8 +101,8 @@ struct cfctrl_request_info {
94826 struct cfctrl {
94827 struct cfsrvl serv;
94828 struct cfctrl_rsp res;
94829- atomic_t req_seq_no;
94830- atomic_t rsp_seq_no;
94831+ atomic_unchecked_t req_seq_no;
94832+ atomic_unchecked_t rsp_seq_no;
94833 struct list_head list;
94834 /* Protects from simultaneous access to first_req list */
94835 spinlock_t info_list_lock;
94836diff --git a/include/net/flow.h b/include/net/flow.h
94837index 8109a15..504466d 100644
94838--- a/include/net/flow.h
94839+++ b/include/net/flow.h
94840@@ -231,6 +231,6 @@ void flow_cache_fini(struct net *net);
94841
94842 void flow_cache_flush(struct net *net);
94843 void flow_cache_flush_deferred(struct net *net);
94844-extern atomic_t flow_cache_genid;
94845+extern atomic_unchecked_t flow_cache_genid;
94846
94847 #endif
94848diff --git a/include/net/genetlink.h b/include/net/genetlink.h
94849index 0574abd..0f16881 100644
94850--- a/include/net/genetlink.h
94851+++ b/include/net/genetlink.h
94852@@ -130,7 +130,7 @@ struct genl_ops {
94853 u8 cmd;
94854 u8 internal_flags;
94855 u8 flags;
94856-};
94857+} __do_const;
94858
94859 int __genl_register_family(struct genl_family *family);
94860
94861diff --git a/include/net/gro_cells.h b/include/net/gro_cells.h
94862index 0f712c0..cd762c4 100644
94863--- a/include/net/gro_cells.h
94864+++ b/include/net/gro_cells.h
94865@@ -27,7 +27,7 @@ static inline void gro_cells_receive(struct gro_cells *gcells, struct sk_buff *s
94866 cell = this_cpu_ptr(gcells->cells);
94867
94868 if (skb_queue_len(&cell->napi_skbs) > netdev_max_backlog) {
94869- atomic_long_inc(&dev->rx_dropped);
94870+ atomic_long_inc_unchecked(&dev->rx_dropped);
94871 kfree_skb(skb);
94872 return;
94873 }
94874diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
94875index 9fe865c..43735aa 100644
94876--- a/include/net/inet_connection_sock.h
94877+++ b/include/net/inet_connection_sock.h
94878@@ -63,7 +63,7 @@ struct inet_connection_sock_af_ops {
94879 int (*bind_conflict)(const struct sock *sk,
94880 const struct inet_bind_bucket *tb, bool relax);
94881 void (*mtu_reduced)(struct sock *sk);
94882-};
94883+} __do_const;
94884
94885 /** inet_connection_sock - INET connection oriented sock
94886 *
94887diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
94888index 80479ab..0c3f647 100644
94889--- a/include/net/inetpeer.h
94890+++ b/include/net/inetpeer.h
94891@@ -47,7 +47,7 @@ struct inet_peer {
94892 */
94893 union {
94894 struct {
94895- atomic_t rid; /* Frag reception counter */
94896+ atomic_unchecked_t rid; /* Frag reception counter */
94897 };
94898 struct rcu_head rcu;
94899 struct inet_peer *gc_next;
94900diff --git a/include/net/ip.h b/include/net/ip.h
94901index 6cc1eaf..14059b0 100644
94902--- a/include/net/ip.h
94903+++ b/include/net/ip.h
94904@@ -317,7 +317,7 @@ static inline unsigned int ip_skb_dst_mtu(const struct sk_buff *skb)
94905 }
94906 }
94907
94908-u32 ip_idents_reserve(u32 hash, int segs);
94909+u32 ip_idents_reserve(u32 hash, int segs) __intentional_overflow(-1);
94910 void __ip_select_ident(struct iphdr *iph, int segs);
94911
94912 static inline void ip_select_ident_segs(struct sk_buff *skb, struct sock *sk, int segs)
94913diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
94914index 5bd120e4..03fb812 100644
94915--- a/include/net/ip_fib.h
94916+++ b/include/net/ip_fib.h
94917@@ -170,7 +170,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
94918
94919 #define FIB_RES_SADDR(net, res) \
94920 ((FIB_RES_NH(res).nh_saddr_genid == \
94921- atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
94922+ atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
94923 FIB_RES_NH(res).nh_saddr : \
94924 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
94925 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
94926diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
94927index 615b20b..fd4cbd8 100644
94928--- a/include/net/ip_vs.h
94929+++ b/include/net/ip_vs.h
94930@@ -534,7 +534,7 @@ struct ip_vs_conn {
94931 struct ip_vs_conn *control; /* Master control connection */
94932 atomic_t n_control; /* Number of controlled ones */
94933 struct ip_vs_dest *dest; /* real server */
94934- atomic_t in_pkts; /* incoming packet counter */
94935+ atomic_unchecked_t in_pkts; /* incoming packet counter */
94936
94937 /* Packet transmitter for different forwarding methods. If it
94938 * mangles the packet, it must return NF_DROP or better NF_STOLEN,
94939@@ -682,7 +682,7 @@ struct ip_vs_dest {
94940 __be16 port; /* port number of the server */
94941 union nf_inet_addr addr; /* IP address of the server */
94942 volatile unsigned int flags; /* dest status flags */
94943- atomic_t conn_flags; /* flags to copy to conn */
94944+ atomic_unchecked_t conn_flags; /* flags to copy to conn */
94945 atomic_t weight; /* server weight */
94946
94947 atomic_t refcnt; /* reference counter */
94948@@ -928,11 +928,11 @@ struct netns_ipvs {
94949 /* ip_vs_lblc */
94950 int sysctl_lblc_expiration;
94951 struct ctl_table_header *lblc_ctl_header;
94952- struct ctl_table *lblc_ctl_table;
94953+ ctl_table_no_const *lblc_ctl_table;
94954 /* ip_vs_lblcr */
94955 int sysctl_lblcr_expiration;
94956 struct ctl_table_header *lblcr_ctl_header;
94957- struct ctl_table *lblcr_ctl_table;
94958+ ctl_table_no_const *lblcr_ctl_table;
94959 /* ip_vs_est */
94960 struct list_head est_list; /* estimator list */
94961 spinlock_t est_lock;
94962diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
94963index 8d4f588..2e37ad2 100644
94964--- a/include/net/irda/ircomm_tty.h
94965+++ b/include/net/irda/ircomm_tty.h
94966@@ -33,6 +33,7 @@
94967 #include <linux/termios.h>
94968 #include <linux/timer.h>
94969 #include <linux/tty.h> /* struct tty_struct */
94970+#include <asm/local.h>
94971
94972 #include <net/irda/irias_object.h>
94973 #include <net/irda/ircomm_core.h>
94974diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
94975index 714cc9a..ea05f3e 100644
94976--- a/include/net/iucv/af_iucv.h
94977+++ b/include/net/iucv/af_iucv.h
94978@@ -149,7 +149,7 @@ struct iucv_skb_cb {
94979 struct iucv_sock_list {
94980 struct hlist_head head;
94981 rwlock_t lock;
94982- atomic_t autobind_name;
94983+ atomic_unchecked_t autobind_name;
94984 };
94985
94986 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
94987diff --git a/include/net/llc_c_ac.h b/include/net/llc_c_ac.h
94988index f3be818..bf46196 100644
94989--- a/include/net/llc_c_ac.h
94990+++ b/include/net/llc_c_ac.h
94991@@ -87,7 +87,7 @@
94992 #define LLC_CONN_AC_STOP_SENDACK_TMR 70
94993 #define LLC_CONN_AC_START_SENDACK_TMR_IF_NOT_RUNNING 71
94994
94995-typedef int (*llc_conn_action_t)(struct sock *sk, struct sk_buff *skb);
94996+typedef int (* const llc_conn_action_t)(struct sock *sk, struct sk_buff *skb);
94997
94998 int llc_conn_ac_clear_remote_busy(struct sock *sk, struct sk_buff *skb);
94999 int llc_conn_ac_conn_ind(struct sock *sk, struct sk_buff *skb);
95000diff --git a/include/net/llc_c_ev.h b/include/net/llc_c_ev.h
95001index 3948cf1..83b28c4 100644
95002--- a/include/net/llc_c_ev.h
95003+++ b/include/net/llc_c_ev.h
95004@@ -125,8 +125,8 @@ static __inline__ struct llc_conn_state_ev *llc_conn_ev(struct sk_buff *skb)
95005 return (struct llc_conn_state_ev *)skb->cb;
95006 }
95007
95008-typedef int (*llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb);
95009-typedef int (*llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb);
95010+typedef int (* const llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb);
95011+typedef int (* const llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb);
95012
95013 int llc_conn_ev_conn_req(struct sock *sk, struct sk_buff *skb);
95014 int llc_conn_ev_data_req(struct sock *sk, struct sk_buff *skb);
95015diff --git a/include/net/llc_c_st.h b/include/net/llc_c_st.h
95016index 48f3f89..0e92c50 100644
95017--- a/include/net/llc_c_st.h
95018+++ b/include/net/llc_c_st.h
95019@@ -37,7 +37,7 @@ struct llc_conn_state_trans {
95020 u8 next_state;
95021 const llc_conn_ev_qfyr_t *ev_qualifiers;
95022 const llc_conn_action_t *ev_actions;
95023-};
95024+} __do_const;
95025
95026 struct llc_conn_state {
95027 u8 current_state;
95028diff --git a/include/net/llc_s_ac.h b/include/net/llc_s_ac.h
95029index a61b98c..aade1eb 100644
95030--- a/include/net/llc_s_ac.h
95031+++ b/include/net/llc_s_ac.h
95032@@ -23,7 +23,7 @@
95033 #define SAP_ACT_TEST_IND 9
95034
95035 /* All action functions must look like this */
95036-typedef int (*llc_sap_action_t)(struct llc_sap *sap, struct sk_buff *skb);
95037+typedef int (* const llc_sap_action_t)(struct llc_sap *sap, struct sk_buff *skb);
95038
95039 int llc_sap_action_unitdata_ind(struct llc_sap *sap, struct sk_buff *skb);
95040 int llc_sap_action_send_ui(struct llc_sap *sap, struct sk_buff *skb);
95041diff --git a/include/net/llc_s_st.h b/include/net/llc_s_st.h
95042index c4359e2..76dbc4a 100644
95043--- a/include/net/llc_s_st.h
95044+++ b/include/net/llc_s_st.h
95045@@ -20,7 +20,7 @@ struct llc_sap_state_trans {
95046 llc_sap_ev_t ev;
95047 u8 next_state;
95048 const llc_sap_action_t *ev_actions;
95049-};
95050+} __do_const;
95051
95052 struct llc_sap_state {
95053 u8 curr_state;
95054diff --git a/include/net/mac80211.h b/include/net/mac80211.h
95055index d52914b..2b13cec 100644
95056--- a/include/net/mac80211.h
95057+++ b/include/net/mac80211.h
95058@@ -4915,7 +4915,7 @@ struct rate_control_ops {
95059 void (*remove_sta_debugfs)(void *priv, void *priv_sta);
95060
95061 u32 (*get_expected_throughput)(void *priv_sta);
95062-};
95063+} __do_const;
95064
95065 static inline int rate_supported(struct ieee80211_sta *sta,
95066 enum ieee80211_band band,
95067diff --git a/include/net/neighbour.h b/include/net/neighbour.h
95068index 76f7084..8f36e39 100644
95069--- a/include/net/neighbour.h
95070+++ b/include/net/neighbour.h
95071@@ -163,7 +163,7 @@ struct neigh_ops {
95072 void (*error_report)(struct neighbour *, struct sk_buff *);
95073 int (*output)(struct neighbour *, struct sk_buff *);
95074 int (*connected_output)(struct neighbour *, struct sk_buff *);
95075-};
95076+} __do_const;
95077
95078 struct pneigh_entry {
95079 struct pneigh_entry *next;
95080@@ -217,7 +217,7 @@ struct neigh_table {
95081 struct neigh_statistics __percpu *stats;
95082 struct neigh_hash_table __rcu *nht;
95083 struct pneigh_entry **phash_buckets;
95084-};
95085+} __randomize_layout;
95086
95087 enum {
95088 NEIGH_ARP_TABLE = 0,
95089diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
95090index 36faf49..6927638 100644
95091--- a/include/net/net_namespace.h
95092+++ b/include/net/net_namespace.h
95093@@ -131,8 +131,8 @@ struct net {
95094 struct netns_ipvs *ipvs;
95095 #endif
95096 struct sock *diag_nlsk;
95097- atomic_t fnhe_genid;
95098-};
95099+ atomic_unchecked_t fnhe_genid;
95100+} __randomize_layout;
95101
95102 #include <linux/seq_file_net.h>
95103
95104@@ -288,7 +288,11 @@ static inline struct net *read_pnet(struct net * const *pnet)
95105 #define __net_init __init
95106 #define __net_exit __exit_refok
95107 #define __net_initdata __initdata
95108+#ifdef CONSTIFY_PLUGIN
95109 #define __net_initconst __initconst
95110+#else
95111+#define __net_initconst __initdata
95112+#endif
95113 #endif
95114
95115 int peernet2id(struct net *net, struct net *peer);
95116@@ -301,7 +305,7 @@ struct pernet_operations {
95117 void (*exit_batch)(struct list_head *net_exit_list);
95118 int *id;
95119 size_t size;
95120-};
95121+} __do_const;
95122
95123 /*
95124 * Use these carefully. If you implement a network device and it
95125@@ -349,12 +353,12 @@ static inline void unregister_net_sysctl_table(struct ctl_table_header *header)
95126
95127 static inline int rt_genid_ipv4(struct net *net)
95128 {
95129- return atomic_read(&net->ipv4.rt_genid);
95130+ return atomic_read_unchecked(&net->ipv4.rt_genid);
95131 }
95132
95133 static inline void rt_genid_bump_ipv4(struct net *net)
95134 {
95135- atomic_inc(&net->ipv4.rt_genid);
95136+ atomic_inc_unchecked(&net->ipv4.rt_genid);
95137 }
95138
95139 extern void (*__fib6_flush_trees)(struct net *net);
95140@@ -381,12 +385,12 @@ static inline void rt_genid_bump_all(struct net *net)
95141
95142 static inline int fnhe_genid(struct net *net)
95143 {
95144- return atomic_read(&net->fnhe_genid);
95145+ return atomic_read_unchecked(&net->fnhe_genid);
95146 }
95147
95148 static inline void fnhe_genid_bump(struct net *net)
95149 {
95150- atomic_inc(&net->fnhe_genid);
95151+ atomic_inc_unchecked(&net->fnhe_genid);
95152 }
95153
95154 #endif /* __NET_NET_NAMESPACE_H */
95155diff --git a/include/net/netlink.h b/include/net/netlink.h
95156index e010ee8..405b9f4 100644
95157--- a/include/net/netlink.h
95158+++ b/include/net/netlink.h
95159@@ -518,7 +518,7 @@ static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
95160 {
95161 if (mark) {
95162 WARN_ON((unsigned char *) mark < skb->data);
95163- skb_trim(skb, (unsigned char *) mark - skb->data);
95164+ skb_trim(skb, (const unsigned char *) mark - skb->data);
95165 }
95166 }
95167
95168diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h
95169index 29d6a94..235d3d84 100644
95170--- a/include/net/netns/conntrack.h
95171+++ b/include/net/netns/conntrack.h
95172@@ -14,10 +14,10 @@ struct nf_conntrack_ecache;
95173 struct nf_proto_net {
95174 #ifdef CONFIG_SYSCTL
95175 struct ctl_table_header *ctl_table_header;
95176- struct ctl_table *ctl_table;
95177+ ctl_table_no_const *ctl_table;
95178 #ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
95179 struct ctl_table_header *ctl_compat_header;
95180- struct ctl_table *ctl_compat_table;
95181+ ctl_table_no_const *ctl_compat_table;
95182 #endif
95183 #endif
95184 unsigned int users;
95185@@ -60,7 +60,7 @@ struct nf_ip_net {
95186 struct nf_icmp_net icmpv6;
95187 #if defined(CONFIG_SYSCTL) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT)
95188 struct ctl_table_header *ctl_table_header;
95189- struct ctl_table *ctl_table;
95190+ ctl_table_no_const *ctl_table;
95191 #endif
95192 };
95193
95194diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
95195index dbe2254..ed0c151 100644
95196--- a/include/net/netns/ipv4.h
95197+++ b/include/net/netns/ipv4.h
95198@@ -87,7 +87,7 @@ struct netns_ipv4 {
95199
95200 struct ping_group_range ping_group_range;
95201
95202- atomic_t dev_addr_genid;
95203+ atomic_unchecked_t dev_addr_genid;
95204
95205 #ifdef CONFIG_SYSCTL
95206 unsigned long *sysctl_local_reserved_ports;
95207@@ -101,6 +101,6 @@ struct netns_ipv4 {
95208 struct fib_rules_ops *mr_rules_ops;
95209 #endif
95210 #endif
95211- atomic_t rt_genid;
95212+ atomic_unchecked_t rt_genid;
95213 };
95214 #endif
95215diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h
95216index 69ae41f..4f94868 100644
95217--- a/include/net/netns/ipv6.h
95218+++ b/include/net/netns/ipv6.h
95219@@ -75,8 +75,8 @@ struct netns_ipv6 {
95220 struct fib_rules_ops *mr6_rules_ops;
95221 #endif
95222 #endif
95223- atomic_t dev_addr_genid;
95224- atomic_t fib6_sernum;
95225+ atomic_unchecked_t dev_addr_genid;
95226+ atomic_unchecked_t fib6_sernum;
95227 };
95228
95229 #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
95230diff --git a/include/net/netns/xfrm.h b/include/net/netns/xfrm.h
95231index 730d82a..045f2c4 100644
95232--- a/include/net/netns/xfrm.h
95233+++ b/include/net/netns/xfrm.h
95234@@ -78,7 +78,7 @@ struct netns_xfrm {
95235
95236 /* flow cache part */
95237 struct flow_cache flow_cache_global;
95238- atomic_t flow_cache_genid;
95239+ atomic_unchecked_t flow_cache_genid;
95240 struct list_head flow_cache_gc_list;
95241 spinlock_t flow_cache_gc_lock;
95242 struct work_struct flow_cache_gc_work;
95243diff --git a/include/net/ping.h b/include/net/ping.h
95244index cc16d41..664f40b 100644
95245--- a/include/net/ping.h
95246+++ b/include/net/ping.h
95247@@ -54,7 +54,7 @@ struct ping_iter_state {
95248
95249 extern struct proto ping_prot;
95250 #if IS_ENABLED(CONFIG_IPV6)
95251-extern struct pingv6_ops pingv6_ops;
95252+extern struct pingv6_ops *pingv6_ops;
95253 #endif
95254
95255 struct pingfakehdr {
95256diff --git a/include/net/protocol.h b/include/net/protocol.h
95257index d6fcc1f..ca277058 100644
95258--- a/include/net/protocol.h
95259+++ b/include/net/protocol.h
95260@@ -49,7 +49,7 @@ struct net_protocol {
95261 * socket lookup?
95262 */
95263 icmp_strict_tag_validation:1;
95264-};
95265+} __do_const;
95266
95267 #if IS_ENABLED(CONFIG_IPV6)
95268 struct inet6_protocol {
95269@@ -62,7 +62,7 @@ struct inet6_protocol {
95270 u8 type, u8 code, int offset,
95271 __be32 info);
95272 unsigned int flags; /* INET6_PROTO_xxx */
95273-};
95274+} __do_const;
95275
95276 #define INET6_PROTO_NOPOLICY 0x1
95277 #define INET6_PROTO_FINAL 0x2
95278diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
95279index 6c6d539..af70817 100644
95280--- a/include/net/rtnetlink.h
95281+++ b/include/net/rtnetlink.h
95282@@ -95,7 +95,7 @@ struct rtnl_link_ops {
95283 const struct net_device *dev,
95284 const struct net_device *slave_dev);
95285 struct net *(*get_link_net)(const struct net_device *dev);
95286-};
95287+} __do_const;
95288
95289 int __rtnl_link_register(struct rtnl_link_ops *ops);
95290 void __rtnl_link_unregister(struct rtnl_link_ops *ops);
95291diff --git a/include/net/sctp/checksum.h b/include/net/sctp/checksum.h
95292index 4a5b9a3..ca27d73 100644
95293--- a/include/net/sctp/checksum.h
95294+++ b/include/net/sctp/checksum.h
95295@@ -61,8 +61,8 @@ static inline __le32 sctp_compute_cksum(const struct sk_buff *skb,
95296 unsigned int offset)
95297 {
95298 struct sctphdr *sh = sctp_hdr(skb);
95299- __le32 ret, old = sh->checksum;
95300- const struct skb_checksum_ops ops = {
95301+ __le32 ret, old = sh->checksum;
95302+ static const struct skb_checksum_ops ops = {
95303 .update = sctp_csum_update,
95304 .combine = sctp_csum_combine,
95305 };
95306diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
95307index 487ef34..d457f98 100644
95308--- a/include/net/sctp/sm.h
95309+++ b/include/net/sctp/sm.h
95310@@ -80,7 +80,7 @@ typedef void (sctp_timer_event_t) (unsigned long);
95311 typedef struct {
95312 sctp_state_fn_t *fn;
95313 const char *name;
95314-} sctp_sm_table_entry_t;
95315+} __do_const sctp_sm_table_entry_t;
95316
95317 /* A naming convention of "sctp_sf_xxx" applies to all the state functions
95318 * currently in use.
95319@@ -292,7 +292,7 @@ __u32 sctp_generate_tag(const struct sctp_endpoint *);
95320 __u32 sctp_generate_tsn(const struct sctp_endpoint *);
95321
95322 /* Extern declarations for major data structures. */
95323-extern sctp_timer_event_t *sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES];
95324+extern sctp_timer_event_t * const sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES];
95325
95326
95327 /* Get the size of a DATA chunk payload. */
95328diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
95329index 495c87e..5b327ff 100644
95330--- a/include/net/sctp/structs.h
95331+++ b/include/net/sctp/structs.h
95332@@ -513,7 +513,7 @@ struct sctp_pf {
95333 void (*to_sk_saddr)(union sctp_addr *, struct sock *sk);
95334 void (*to_sk_daddr)(union sctp_addr *, struct sock *sk);
95335 struct sctp_af *af;
95336-};
95337+} __do_const;
95338
95339
95340 /* Structure to track chunk fragments that have been acked, but peer
95341diff --git a/include/net/sock.h b/include/net/sock.h
95342index e4079c2..79c5d3a 100644
95343--- a/include/net/sock.h
95344+++ b/include/net/sock.h
95345@@ -362,7 +362,7 @@ struct sock {
95346 unsigned int sk_napi_id;
95347 unsigned int sk_ll_usec;
95348 #endif
95349- atomic_t sk_drops;
95350+ atomic_unchecked_t sk_drops;
95351 int sk_rcvbuf;
95352
95353 struct sk_filter __rcu *sk_filter;
95354@@ -1039,7 +1039,7 @@ struct proto {
95355 void (*destroy_cgroup)(struct mem_cgroup *memcg);
95356 struct cg_proto *(*proto_cgroup)(struct mem_cgroup *memcg);
95357 #endif
95358-};
95359+} __randomize_layout;
95360
95361 /*
95362 * Bits in struct cg_proto.flags
95363@@ -1212,7 +1212,7 @@ static inline void memcg_memory_allocated_sub(struct cg_proto *prot,
95364 page_counter_uncharge(&prot->memory_allocated, amt);
95365 }
95366
95367-static inline long
95368+static inline long __intentional_overflow(-1)
95369 sk_memory_allocated(const struct sock *sk)
95370 {
95371 struct proto *prot = sk->sk_prot;
95372@@ -1778,7 +1778,7 @@ static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags)
95373 }
95374
95375 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
95376- struct iov_iter *from, char *to,
95377+ struct iov_iter *from, unsigned char *to,
95378 int copy, int offset)
95379 {
95380 if (skb->ip_summed == CHECKSUM_NONE) {
95381@@ -2025,7 +2025,7 @@ static inline void sk_stream_moderate_sndbuf(struct sock *sk)
95382 }
95383 }
95384
95385-struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
95386+struct sk_buff * __intentional_overflow(0) sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
95387
95388 /**
95389 * sk_page_frag - return an appropriate page_frag
95390diff --git a/include/net/tcp.h b/include/net/tcp.h
95391index 8d6b983..5813205 100644
95392--- a/include/net/tcp.h
95393+++ b/include/net/tcp.h
95394@@ -516,7 +516,7 @@ void tcp_retransmit_timer(struct sock *sk);
95395 void tcp_xmit_retransmit_queue(struct sock *);
95396 void tcp_simple_retransmit(struct sock *);
95397 int tcp_trim_head(struct sock *, struct sk_buff *, u32);
95398-int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int, gfp_t);
95399+int __intentional_overflow(3) tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int, gfp_t);
95400
95401 void tcp_send_probe0(struct sock *);
95402 void tcp_send_partial(struct sock *);
95403@@ -694,8 +694,8 @@ static inline u32 tcp_skb_timestamp(const struct sk_buff *skb)
95404 * If this grows please adjust skbuff.h:skbuff->cb[xxx] size appropriately.
95405 */
95406 struct tcp_skb_cb {
95407- __u32 seq; /* Starting sequence number */
95408- __u32 end_seq; /* SEQ + FIN + SYN + datalen */
95409+ __u32 seq __intentional_overflow(0); /* Starting sequence number */
95410+ __u32 end_seq __intentional_overflow(0); /* SEQ + FIN + SYN + datalen */
95411 union {
95412 /* Note : tcp_tw_isn is used in input path only
95413 * (isn chosen by tcp_timewait_state_process())
95414@@ -720,7 +720,7 @@ struct tcp_skb_cb {
95415
95416 __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */
95417 /* 1 byte hole */
95418- __u32 ack_seq; /* Sequence number ACK'd */
95419+ __u32 ack_seq __intentional_overflow(0); /* Sequence number ACK'd */
95420 union {
95421 struct inet_skb_parm h4;
95422 #if IS_ENABLED(CONFIG_IPV6)
95423diff --git a/include/net/xfrm.h b/include/net/xfrm.h
95424index dc4865e..152ee4c 100644
95425--- a/include/net/xfrm.h
95426+++ b/include/net/xfrm.h
95427@@ -285,7 +285,6 @@ struct xfrm_dst;
95428 struct xfrm_policy_afinfo {
95429 unsigned short family;
95430 struct dst_ops *dst_ops;
95431- void (*garbage_collect)(struct net *net);
95432 struct dst_entry *(*dst_lookup)(struct net *net, int tos,
95433 const xfrm_address_t *saddr,
95434 const xfrm_address_t *daddr);
95435@@ -303,7 +302,7 @@ struct xfrm_policy_afinfo {
95436 struct net_device *dev,
95437 const struct flowi *fl);
95438 struct dst_entry *(*blackhole_route)(struct net *net, struct dst_entry *orig);
95439-};
95440+} __do_const;
95441
95442 int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo);
95443 int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo);
95444@@ -342,7 +341,7 @@ struct xfrm_state_afinfo {
95445 int (*transport_finish)(struct sk_buff *skb,
95446 int async);
95447 void (*local_error)(struct sk_buff *skb, u32 mtu);
95448-};
95449+} __do_const;
95450
95451 int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo);
95452 int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo);
95453@@ -437,7 +436,7 @@ struct xfrm_mode {
95454 struct module *owner;
95455 unsigned int encap;
95456 int flags;
95457-};
95458+} __do_const;
95459
95460 /* Flags for xfrm_mode. */
95461 enum {
95462@@ -534,7 +533,7 @@ struct xfrm_policy {
95463 struct timer_list timer;
95464
95465 struct flow_cache_object flo;
95466- atomic_t genid;
95467+ atomic_unchecked_t genid;
95468 u32 priority;
95469 u32 index;
95470 struct xfrm_mark mark;
95471@@ -1167,6 +1166,7 @@ static inline void xfrm_sk_free_policy(struct sock *sk)
95472 }
95473
95474 void xfrm_garbage_collect(struct net *net);
95475+void xfrm_garbage_collect_deferred(struct net *net);
95476
95477 #else
95478
95479@@ -1205,6 +1205,9 @@ static inline int xfrm6_policy_check_reverse(struct sock *sk, int dir,
95480 static inline void xfrm_garbage_collect(struct net *net)
95481 {
95482 }
95483+static inline void xfrm_garbage_collect_deferred(struct net *net)
95484+{
95485+}
95486 #endif
95487
95488 static __inline__
95489diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
95490index 1017e0b..227aa4d 100644
95491--- a/include/rdma/iw_cm.h
95492+++ b/include/rdma/iw_cm.h
95493@@ -122,7 +122,7 @@ struct iw_cm_verbs {
95494 int backlog);
95495
95496 int (*destroy_listen)(struct iw_cm_id *cm_id);
95497-};
95498+} __no_const;
95499
95500 /**
95501 * iw_create_cm_id - Create an IW CM identifier.
95502diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
95503index 93d14da..734b3d8 100644
95504--- a/include/scsi/libfc.h
95505+++ b/include/scsi/libfc.h
95506@@ -771,6 +771,7 @@ struct libfc_function_template {
95507 */
95508 void (*disc_stop_final) (struct fc_lport *);
95509 };
95510+typedef struct libfc_function_template __no_const libfc_function_template_no_const;
95511
95512 /**
95513 * struct fc_disc - Discovery context
95514@@ -875,7 +876,7 @@ struct fc_lport {
95515 struct fc_vport *vport;
95516
95517 /* Operational Information */
95518- struct libfc_function_template tt;
95519+ libfc_function_template_no_const tt;
95520 u8 link_up;
95521 u8 qfull;
95522 enum fc_lport_state state;
95523diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
95524index a4c9336..d6f8f34 100644
95525--- a/include/scsi/scsi_device.h
95526+++ b/include/scsi/scsi_device.h
95527@@ -185,9 +185,9 @@ struct scsi_device {
95528 unsigned int max_device_blocked; /* what device_blocked counts down from */
95529 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
95530
95531- atomic_t iorequest_cnt;
95532- atomic_t iodone_cnt;
95533- atomic_t ioerr_cnt;
95534+ atomic_unchecked_t iorequest_cnt;
95535+ atomic_unchecked_t iodone_cnt;
95536+ atomic_unchecked_t ioerr_cnt;
95537
95538 struct device sdev_gendev,
95539 sdev_dev;
95540diff --git a/include/scsi/scsi_driver.h b/include/scsi/scsi_driver.h
95541index 891a658..fcd68df 100644
95542--- a/include/scsi/scsi_driver.h
95543+++ b/include/scsi/scsi_driver.h
95544@@ -14,7 +14,7 @@ struct scsi_driver {
95545 void (*rescan)(struct device *);
95546 int (*init_command)(struct scsi_cmnd *);
95547 void (*uninit_command)(struct scsi_cmnd *);
95548- int (*done)(struct scsi_cmnd *);
95549+ unsigned int (*done)(struct scsi_cmnd *);
95550 int (*eh_action)(struct scsi_cmnd *, int);
95551 };
95552 #define to_scsi_driver(drv) \
95553diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
95554index 007a0bc..7188db8 100644
95555--- a/include/scsi/scsi_transport_fc.h
95556+++ b/include/scsi/scsi_transport_fc.h
95557@@ -756,7 +756,8 @@ struct fc_function_template {
95558 unsigned long show_host_system_hostname:1;
95559
95560 unsigned long disable_target_scan:1;
95561-};
95562+} __do_const;
95563+typedef struct fc_function_template __no_const fc_function_template_no_const;
95564
95565
95566 /**
95567diff --git a/include/sound/compress_driver.h b/include/sound/compress_driver.h
95568index f48089d..73abe48 100644
95569--- a/include/sound/compress_driver.h
95570+++ b/include/sound/compress_driver.h
95571@@ -130,7 +130,7 @@ struct snd_compr_ops {
95572 struct snd_compr_caps *caps);
95573 int (*get_codec_caps) (struct snd_compr_stream *stream,
95574 struct snd_compr_codec_caps *codec);
95575-};
95576+} __no_const;
95577
95578 /**
95579 * struct snd_compr: Compressed device
95580diff --git a/include/sound/soc.h b/include/sound/soc.h
95581index 0d1ade1..34e77d3 100644
95582--- a/include/sound/soc.h
95583+++ b/include/sound/soc.h
95584@@ -856,7 +856,7 @@ struct snd_soc_codec_driver {
95585 enum snd_soc_dapm_type, int);
95586
95587 bool ignore_pmdown_time; /* Doesn't benefit from pmdown delay */
95588-};
95589+} __do_const;
95590
95591 /* SoC platform interface */
95592 struct snd_soc_platform_driver {
95593@@ -883,7 +883,7 @@ struct snd_soc_platform_driver {
95594 const struct snd_compr_ops *compr_ops;
95595
95596 int (*bespoke_trigger)(struct snd_pcm_substream *, int);
95597-};
95598+} __do_const;
95599
95600 struct snd_soc_dai_link_component {
95601 const char *name;
95602diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
95603index 985ca4c..b55b54a 100644
95604--- a/include/target/target_core_base.h
95605+++ b/include/target/target_core_base.h
95606@@ -767,7 +767,7 @@ struct se_device {
95607 atomic_long_t write_bytes;
95608 /* Active commands on this virtual SE device */
95609 atomic_t simple_cmds;
95610- atomic_t dev_ordered_id;
95611+ atomic_unchecked_t dev_ordered_id;
95612 atomic_t dev_ordered_sync;
95613 atomic_t dev_qf_count;
95614 int export_count;
95615diff --git a/include/trace/events/fs.h b/include/trace/events/fs.h
95616new file mode 100644
95617index 0000000..fb634b7
95618--- /dev/null
95619+++ b/include/trace/events/fs.h
95620@@ -0,0 +1,53 @@
95621+#undef TRACE_SYSTEM
95622+#define TRACE_SYSTEM fs
95623+
95624+#if !defined(_TRACE_FS_H) || defined(TRACE_HEADER_MULTI_READ)
95625+#define _TRACE_FS_H
95626+
95627+#include <linux/fs.h>
95628+#include <linux/tracepoint.h>
95629+
95630+TRACE_EVENT(do_sys_open,
95631+
95632+ TP_PROTO(const char *filename, int flags, int mode),
95633+
95634+ TP_ARGS(filename, flags, mode),
95635+
95636+ TP_STRUCT__entry(
95637+ __string( filename, filename )
95638+ __field( int, flags )
95639+ __field( int, mode )
95640+ ),
95641+
95642+ TP_fast_assign(
95643+ __assign_str(filename, filename);
95644+ __entry->flags = flags;
95645+ __entry->mode = mode;
95646+ ),
95647+
95648+ TP_printk("\"%s\" %x %o",
95649+ __get_str(filename), __entry->flags, __entry->mode)
95650+);
95651+
95652+TRACE_EVENT(open_exec,
95653+
95654+ TP_PROTO(const char *filename),
95655+
95656+ TP_ARGS(filename),
95657+
95658+ TP_STRUCT__entry(
95659+ __string( filename, filename )
95660+ ),
95661+
95662+ TP_fast_assign(
95663+ __assign_str(filename, filename);
95664+ ),
95665+
95666+ TP_printk("\"%s\"",
95667+ __get_str(filename))
95668+);
95669+
95670+#endif /* _TRACE_FS_H */
95671+
95672+/* This part must be outside protection */
95673+#include <trace/define_trace.h>
95674diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
95675index 3608beb..df39d8a 100644
95676--- a/include/trace/events/irq.h
95677+++ b/include/trace/events/irq.h
95678@@ -36,7 +36,7 @@ struct softirq_action;
95679 */
95680 TRACE_EVENT(irq_handler_entry,
95681
95682- TP_PROTO(int irq, struct irqaction *action),
95683+ TP_PROTO(int irq, const struct irqaction *action),
95684
95685 TP_ARGS(irq, action),
95686
95687@@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
95688 */
95689 TRACE_EVENT(irq_handler_exit,
95690
95691- TP_PROTO(int irq, struct irqaction *action, int ret),
95692+ TP_PROTO(int irq, const struct irqaction *action, int ret),
95693
95694 TP_ARGS(irq, action, ret),
95695
95696diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
95697index 6eed16b..3e05750 100644
95698--- a/include/uapi/drm/i915_drm.h
95699+++ b/include/uapi/drm/i915_drm.h
95700@@ -347,6 +347,7 @@ typedef struct drm_i915_irq_wait {
95701 #define I915_PARAM_HAS_COHERENT_PHYS_GTT 29
95702 #define I915_PARAM_MMAP_VERSION 30
95703 #define I915_PARAM_HAS_BSD2 31
95704+#define I915_PARAM_HAS_LEGACY_CONTEXT 35
95705
95706 typedef struct drm_i915_getparam {
95707 int param;
95708diff --git a/include/uapi/linux/a.out.h b/include/uapi/linux/a.out.h
95709index 7caf44c..23c6f27 100644
95710--- a/include/uapi/linux/a.out.h
95711+++ b/include/uapi/linux/a.out.h
95712@@ -39,6 +39,14 @@ enum machine_type {
95713 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
95714 };
95715
95716+/* Constants for the N_FLAGS field */
95717+#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
95718+#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
95719+#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
95720+#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
95721+/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
95722+#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
95723+
95724 #if !defined (N_MAGIC)
95725 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
95726 #endif
95727diff --git a/include/uapi/linux/bcache.h b/include/uapi/linux/bcache.h
95728index 22b6ad3..aeba37e 100644
95729--- a/include/uapi/linux/bcache.h
95730+++ b/include/uapi/linux/bcache.h
95731@@ -5,6 +5,7 @@
95732 * Bcache on disk data structures
95733 */
95734
95735+#include <linux/compiler.h>
95736 #include <asm/types.h>
95737
95738 #define BITMASK(name, type, field, offset, size) \
95739@@ -20,8 +21,8 @@ static inline void SET_##name(type *k, __u64 v) \
95740 /* Btree keys - all units are in sectors */
95741
95742 struct bkey {
95743- __u64 high;
95744- __u64 low;
95745+ __u64 high __intentional_overflow(-1);
95746+ __u64 low __intentional_overflow(-1);
95747 __u64 ptr[];
95748 };
95749
95750diff --git a/include/uapi/linux/byteorder/little_endian.h b/include/uapi/linux/byteorder/little_endian.h
95751index d876736..ccce5c0 100644
95752--- a/include/uapi/linux/byteorder/little_endian.h
95753+++ b/include/uapi/linux/byteorder/little_endian.h
95754@@ -42,51 +42,51 @@
95755
95756 static inline __le64 __cpu_to_le64p(const __u64 *p)
95757 {
95758- return (__force __le64)*p;
95759+ return (__force const __le64)*p;
95760 }
95761-static inline __u64 __le64_to_cpup(const __le64 *p)
95762+static inline __u64 __intentional_overflow(-1) __le64_to_cpup(const __le64 *p)
95763 {
95764- return (__force __u64)*p;
95765+ return (__force const __u64)*p;
95766 }
95767 static inline __le32 __cpu_to_le32p(const __u32 *p)
95768 {
95769- return (__force __le32)*p;
95770+ return (__force const __le32)*p;
95771 }
95772 static inline __u32 __le32_to_cpup(const __le32 *p)
95773 {
95774- return (__force __u32)*p;
95775+ return (__force const __u32)*p;
95776 }
95777 static inline __le16 __cpu_to_le16p(const __u16 *p)
95778 {
95779- return (__force __le16)*p;
95780+ return (__force const __le16)*p;
95781 }
95782 static inline __u16 __le16_to_cpup(const __le16 *p)
95783 {
95784- return (__force __u16)*p;
95785+ return (__force const __u16)*p;
95786 }
95787 static inline __be64 __cpu_to_be64p(const __u64 *p)
95788 {
95789- return (__force __be64)__swab64p(p);
95790+ return (__force const __be64)__swab64p(p);
95791 }
95792 static inline __u64 __be64_to_cpup(const __be64 *p)
95793 {
95794- return __swab64p((__u64 *)p);
95795+ return __swab64p((const __u64 *)p);
95796 }
95797 static inline __be32 __cpu_to_be32p(const __u32 *p)
95798 {
95799- return (__force __be32)__swab32p(p);
95800+ return (__force const __be32)__swab32p(p);
95801 }
95802-static inline __u32 __be32_to_cpup(const __be32 *p)
95803+static inline __u32 __intentional_overflow(-1) __be32_to_cpup(const __be32 *p)
95804 {
95805- return __swab32p((__u32 *)p);
95806+ return __swab32p((const __u32 *)p);
95807 }
95808 static inline __be16 __cpu_to_be16p(const __u16 *p)
95809 {
95810- return (__force __be16)__swab16p(p);
95811+ return (__force const __be16)__swab16p(p);
95812 }
95813 static inline __u16 __be16_to_cpup(const __be16 *p)
95814 {
95815- return __swab16p((__u16 *)p);
95816+ return __swab16p((const __u16 *)p);
95817 }
95818 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
95819 #define __le64_to_cpus(x) do { (void)(x); } while (0)
95820diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h
95821index 71e1d0e..6cc9caf 100644
95822--- a/include/uapi/linux/elf.h
95823+++ b/include/uapi/linux/elf.h
95824@@ -37,6 +37,17 @@ typedef __s64 Elf64_Sxword;
95825 #define PT_GNU_EH_FRAME 0x6474e550
95826
95827 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
95828+#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
95829+
95830+#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
95831+
95832+/* Constants for the e_flags field */
95833+#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
95834+#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
95835+#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
95836+#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
95837+/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
95838+#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
95839
95840 /*
95841 * Extended Numbering
95842@@ -94,6 +105,8 @@ typedef __s64 Elf64_Sxword;
95843 #define DT_DEBUG 21
95844 #define DT_TEXTREL 22
95845 #define DT_JMPREL 23
95846+#define DT_FLAGS 30
95847+ #define DF_TEXTREL 0x00000004
95848 #define DT_ENCODING 32
95849 #define OLD_DT_LOOS 0x60000000
95850 #define DT_LOOS 0x6000000d
95851@@ -240,6 +253,19 @@ typedef struct elf64_hdr {
95852 #define PF_W 0x2
95853 #define PF_X 0x1
95854
95855+#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
95856+#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
95857+#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
95858+#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
95859+#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
95860+#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
95861+/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
95862+/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
95863+#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
95864+#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
95865+#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
95866+#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
95867+
95868 typedef struct elf32_phdr{
95869 Elf32_Word p_type;
95870 Elf32_Off p_offset;
95871@@ -332,6 +358,8 @@ typedef struct elf64_shdr {
95872 #define EI_OSABI 7
95873 #define EI_PAD 8
95874
95875+#define EI_PAX 14
95876+
95877 #define ELFMAG0 0x7f /* EI_MAG */
95878 #define ELFMAG1 'E'
95879 #define ELFMAG2 'L'
95880diff --git a/include/uapi/linux/personality.h b/include/uapi/linux/personality.h
95881index aa169c4..6a2771d 100644
95882--- a/include/uapi/linux/personality.h
95883+++ b/include/uapi/linux/personality.h
95884@@ -30,6 +30,7 @@ enum {
95885 #define PER_CLEAR_ON_SETID (READ_IMPLIES_EXEC | \
95886 ADDR_NO_RANDOMIZE | \
95887 ADDR_COMPAT_LAYOUT | \
95888+ ADDR_LIMIT_3GB | \
95889 MMAP_PAGE_ZERO)
95890
95891 /*
95892diff --git a/include/uapi/linux/screen_info.h b/include/uapi/linux/screen_info.h
95893index 7530e74..e714828 100644
95894--- a/include/uapi/linux/screen_info.h
95895+++ b/include/uapi/linux/screen_info.h
95896@@ -43,7 +43,8 @@ struct screen_info {
95897 __u16 pages; /* 0x32 */
95898 __u16 vesa_attributes; /* 0x34 */
95899 __u32 capabilities; /* 0x36 */
95900- __u8 _reserved[6]; /* 0x3a */
95901+ __u16 vesapm_size; /* 0x3a */
95902+ __u8 _reserved[4]; /* 0x3c */
95903 } __attribute__((packed));
95904
95905 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
95906diff --git a/include/uapi/linux/swab.h b/include/uapi/linux/swab.h
95907index 0e011eb..82681b1 100644
95908--- a/include/uapi/linux/swab.h
95909+++ b/include/uapi/linux/swab.h
95910@@ -43,7 +43,7 @@
95911 * ___swab16, ___swab32, ___swab64, ___swahw32, ___swahb32
95912 */
95913
95914-static inline __attribute_const__ __u16 __fswab16(__u16 val)
95915+static inline __intentional_overflow(-1) __attribute_const__ __u16 __fswab16(__u16 val)
95916 {
95917 #ifdef __HAVE_BUILTIN_BSWAP16__
95918 return __builtin_bswap16(val);
95919@@ -54,7 +54,7 @@ static inline __attribute_const__ __u16 __fswab16(__u16 val)
95920 #endif
95921 }
95922
95923-static inline __attribute_const__ __u32 __fswab32(__u32 val)
95924+static inline __intentional_overflow(-1) __attribute_const__ __u32 __fswab32(__u32 val)
95925 {
95926 #ifdef __HAVE_BUILTIN_BSWAP32__
95927 return __builtin_bswap32(val);
95928@@ -65,7 +65,7 @@ static inline __attribute_const__ __u32 __fswab32(__u32 val)
95929 #endif
95930 }
95931
95932-static inline __attribute_const__ __u64 __fswab64(__u64 val)
95933+static inline __intentional_overflow(-1) __attribute_const__ __u64 __fswab64(__u64 val)
95934 {
95935 #ifdef __HAVE_BUILTIN_BSWAP64__
95936 return __builtin_bswap64(val);
95937diff --git a/include/uapi/linux/xattr.h b/include/uapi/linux/xattr.h
95938index 1590c49..5eab462 100644
95939--- a/include/uapi/linux/xattr.h
95940+++ b/include/uapi/linux/xattr.h
95941@@ -73,5 +73,9 @@
95942 #define XATTR_POSIX_ACL_DEFAULT "posix_acl_default"
95943 #define XATTR_NAME_POSIX_ACL_DEFAULT XATTR_SYSTEM_PREFIX XATTR_POSIX_ACL_DEFAULT
95944
95945+/* User namespace */
95946+#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
95947+#define XATTR_PAX_FLAGS_SUFFIX "flags"
95948+#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
95949
95950 #endif /* _UAPI_LINUX_XATTR_H */
95951diff --git a/include/video/udlfb.h b/include/video/udlfb.h
95952index f9466fa..f4e2b81 100644
95953--- a/include/video/udlfb.h
95954+++ b/include/video/udlfb.h
95955@@ -53,10 +53,10 @@ struct dlfb_data {
95956 u32 pseudo_palette[256];
95957 int blank_mode; /*one of FB_BLANK_ */
95958 /* blit-only rendering path metrics, exposed through sysfs */
95959- atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
95960- atomic_t bytes_identical; /* saved effort with backbuffer comparison */
95961- atomic_t bytes_sent; /* to usb, after compression including overhead */
95962- atomic_t cpu_kcycles_used; /* transpired during pixel processing */
95963+ atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
95964+ atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
95965+ atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
95966+ atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
95967 };
95968
95969 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
95970diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
95971index 30f5362..8ed8ac9 100644
95972--- a/include/video/uvesafb.h
95973+++ b/include/video/uvesafb.h
95974@@ -122,6 +122,7 @@ struct uvesafb_par {
95975 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
95976 u8 pmi_setpal; /* PMI for palette changes */
95977 u16 *pmi_base; /* protected mode interface location */
95978+ u8 *pmi_code; /* protected mode code location */
95979 void *pmi_start;
95980 void *pmi_pal;
95981 u8 *vbe_state_orig; /*
95982diff --git a/init/Kconfig b/init/Kconfig
95983index f5dbc6d..8259396 100644
95984--- a/init/Kconfig
95985+++ b/init/Kconfig
95986@@ -1136,6 +1136,7 @@ endif # CGROUPS
95987
95988 config CHECKPOINT_RESTORE
95989 bool "Checkpoint/restore support" if EXPERT
95990+ depends on !GRKERNSEC
95991 default n
95992 help
95993 Enables additional kernel features in a sake of checkpoint/restore.
95994@@ -1646,7 +1647,7 @@ config SLUB_DEBUG
95995
95996 config COMPAT_BRK
95997 bool "Disable heap randomization"
95998- default y
95999+ default n
96000 help
96001 Randomizing heap placement makes heap exploits harder, but it
96002 also breaks ancient binaries (including anything libc5 based).
96003@@ -1977,7 +1978,7 @@ config INIT_ALL_POSSIBLE
96004 config STOP_MACHINE
96005 bool
96006 default y
96007- depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU
96008+ depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU || GRKERNSEC
96009 help
96010 Need stop_machine() primitive.
96011
96012diff --git a/init/Makefile b/init/Makefile
96013index 7bc47ee..6da2dc7 100644
96014--- a/init/Makefile
96015+++ b/init/Makefile
96016@@ -2,6 +2,9 @@
96017 # Makefile for the linux kernel.
96018 #
96019
96020+ccflags-y := $(GCC_PLUGINS_CFLAGS)
96021+asflags-y := $(GCC_PLUGINS_AFLAGS)
96022+
96023 obj-y := main.o version.o mounts.o
96024 ifneq ($(CONFIG_BLK_DEV_INITRD),y)
96025 obj-y += noinitramfs.o
96026diff --git a/init/do_mounts.c b/init/do_mounts.c
96027index eb41008..f5dbbf9 100644
96028--- a/init/do_mounts.c
96029+++ b/init/do_mounts.c
96030@@ -360,11 +360,11 @@ static void __init get_fs_names(char *page)
96031 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
96032 {
96033 struct super_block *s;
96034- int err = sys_mount(name, "/root", fs, flags, data);
96035+ int err = sys_mount((char __force_user *)name, (char __force_user *)"/root", (char __force_user *)fs, flags, (void __force_user *)data);
96036 if (err)
96037 return err;
96038
96039- sys_chdir("/root");
96040+ sys_chdir((const char __force_user *)"/root");
96041 s = current->fs->pwd.dentry->d_sb;
96042 ROOT_DEV = s->s_dev;
96043 printk(KERN_INFO
96044@@ -487,18 +487,18 @@ void __init change_floppy(char *fmt, ...)
96045 va_start(args, fmt);
96046 vsprintf(buf, fmt, args);
96047 va_end(args);
96048- fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
96049+ fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
96050 if (fd >= 0) {
96051 sys_ioctl(fd, FDEJECT, 0);
96052 sys_close(fd);
96053 }
96054 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
96055- fd = sys_open("/dev/console", O_RDWR, 0);
96056+ fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
96057 if (fd >= 0) {
96058 sys_ioctl(fd, TCGETS, (long)&termios);
96059 termios.c_lflag &= ~ICANON;
96060 sys_ioctl(fd, TCSETSF, (long)&termios);
96061- sys_read(fd, &c, 1);
96062+ sys_read(fd, (char __user *)&c, 1);
96063 termios.c_lflag |= ICANON;
96064 sys_ioctl(fd, TCSETSF, (long)&termios);
96065 sys_close(fd);
96066@@ -592,8 +592,8 @@ void __init prepare_namespace(void)
96067 mount_root();
96068 out:
96069 devtmpfs_mount("dev");
96070- sys_mount(".", "/", NULL, MS_MOVE, NULL);
96071- sys_chroot(".");
96072+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
96073+ sys_chroot((const char __force_user *)".");
96074 }
96075
96076 static bool is_tmpfs;
96077diff --git a/init/do_mounts.h b/init/do_mounts.h
96078index f5b978a..69dbfe8 100644
96079--- a/init/do_mounts.h
96080+++ b/init/do_mounts.h
96081@@ -15,15 +15,15 @@ extern int root_mountflags;
96082
96083 static inline int create_dev(char *name, dev_t dev)
96084 {
96085- sys_unlink(name);
96086- return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
96087+ sys_unlink((char __force_user *)name);
96088+ return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
96089 }
96090
96091 #if BITS_PER_LONG == 32
96092 static inline u32 bstat(char *name)
96093 {
96094 struct stat64 stat;
96095- if (sys_stat64(name, &stat) != 0)
96096+ if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
96097 return 0;
96098 if (!S_ISBLK(stat.st_mode))
96099 return 0;
96100@@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
96101 static inline u32 bstat(char *name)
96102 {
96103 struct stat stat;
96104- if (sys_newstat(name, &stat) != 0)
96105+ if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
96106 return 0;
96107 if (!S_ISBLK(stat.st_mode))
96108 return 0;
96109diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
96110index 3e0878e..8a9d7a0 100644
96111--- a/init/do_mounts_initrd.c
96112+++ b/init/do_mounts_initrd.c
96113@@ -37,13 +37,13 @@ static int init_linuxrc(struct subprocess_info *info, struct cred *new)
96114 {
96115 sys_unshare(CLONE_FS | CLONE_FILES);
96116 /* stdin/stdout/stderr for /linuxrc */
96117- sys_open("/dev/console", O_RDWR, 0);
96118+ sys_open((const char __force_user *)"/dev/console", O_RDWR, 0);
96119 sys_dup(0);
96120 sys_dup(0);
96121 /* move initrd over / and chdir/chroot in initrd root */
96122- sys_chdir("/root");
96123- sys_mount(".", "/", NULL, MS_MOVE, NULL);
96124- sys_chroot(".");
96125+ sys_chdir((const char __force_user *)"/root");
96126+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
96127+ sys_chroot((const char __force_user *)".");
96128 sys_setsid();
96129 return 0;
96130 }
96131@@ -59,8 +59,8 @@ static void __init handle_initrd(void)
96132 create_dev("/dev/root.old", Root_RAM0);
96133 /* mount initrd on rootfs' /root */
96134 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
96135- sys_mkdir("/old", 0700);
96136- sys_chdir("/old");
96137+ sys_mkdir((const char __force_user *)"/old", 0700);
96138+ sys_chdir((const char __force_user *)"/old");
96139
96140 /* try loading default modules from initrd */
96141 load_default_modules();
96142@@ -80,31 +80,31 @@ static void __init handle_initrd(void)
96143 current->flags &= ~PF_FREEZER_SKIP;
96144
96145 /* move initrd to rootfs' /old */
96146- sys_mount("..", ".", NULL, MS_MOVE, NULL);
96147+ sys_mount((char __force_user *)"..", (char __force_user *)".", NULL, MS_MOVE, NULL);
96148 /* switch root and cwd back to / of rootfs */
96149- sys_chroot("..");
96150+ sys_chroot((const char __force_user *)"..");
96151
96152 if (new_decode_dev(real_root_dev) == Root_RAM0) {
96153- sys_chdir("/old");
96154+ sys_chdir((const char __force_user *)"/old");
96155 return;
96156 }
96157
96158- sys_chdir("/");
96159+ sys_chdir((const char __force_user *)"/");
96160 ROOT_DEV = new_decode_dev(real_root_dev);
96161 mount_root();
96162
96163 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
96164- error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
96165+ error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
96166 if (!error)
96167 printk("okay\n");
96168 else {
96169- int fd = sys_open("/dev/root.old", O_RDWR, 0);
96170+ int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
96171 if (error == -ENOENT)
96172 printk("/initrd does not exist. Ignored.\n");
96173 else
96174 printk("failed\n");
96175 printk(KERN_NOTICE "Unmounting old root\n");
96176- sys_umount("/old", MNT_DETACH);
96177+ sys_umount((char __force_user *)"/old", MNT_DETACH);
96178 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
96179 if (fd < 0) {
96180 error = fd;
96181@@ -127,11 +127,11 @@ int __init initrd_load(void)
96182 * mounted in the normal path.
96183 */
96184 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
96185- sys_unlink("/initrd.image");
96186+ sys_unlink((const char __force_user *)"/initrd.image");
96187 handle_initrd();
96188 return 1;
96189 }
96190 }
96191- sys_unlink("/initrd.image");
96192+ sys_unlink((const char __force_user *)"/initrd.image");
96193 return 0;
96194 }
96195diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
96196index 8cb6db5..d729f50 100644
96197--- a/init/do_mounts_md.c
96198+++ b/init/do_mounts_md.c
96199@@ -180,7 +180,7 @@ static void __init md_setup_drive(void)
96200 partitioned ? "_d" : "", minor,
96201 md_setup_args[ent].device_names);
96202
96203- fd = sys_open(name, 0, 0);
96204+ fd = sys_open((char __force_user *)name, 0, 0);
96205 if (fd < 0) {
96206 printk(KERN_ERR "md: open failed - cannot start "
96207 "array %s\n", name);
96208@@ -243,7 +243,7 @@ static void __init md_setup_drive(void)
96209 * array without it
96210 */
96211 sys_close(fd);
96212- fd = sys_open(name, 0, 0);
96213+ fd = sys_open((char __force_user *)name, 0, 0);
96214 sys_ioctl(fd, BLKRRPART, 0);
96215 }
96216 sys_close(fd);
96217@@ -293,7 +293,7 @@ static void __init autodetect_raid(void)
96218
96219 wait_for_device_probe();
96220
96221- fd = sys_open("/dev/md0", 0, 0);
96222+ fd = sys_open((const char __force_user *) "/dev/md0", 0, 0);
96223 if (fd >= 0) {
96224 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
96225 sys_close(fd);
96226diff --git a/init/init_task.c b/init/init_task.c
96227index ba0a7f36..2bcf1d5 100644
96228--- a/init/init_task.c
96229+++ b/init/init_task.c
96230@@ -22,5 +22,9 @@ EXPORT_SYMBOL(init_task);
96231 * Initial thread structure. Alignment of this is handled by a special
96232 * linker map entry.
96233 */
96234+#ifdef CONFIG_X86
96235+union thread_union init_thread_union __init_task_data;
96236+#else
96237 union thread_union init_thread_union __init_task_data =
96238 { INIT_THREAD_INFO(init_task) };
96239+#endif
96240diff --git a/init/initramfs.c b/init/initramfs.c
96241index ad1bd77..dca2c1b 100644
96242--- a/init/initramfs.c
96243+++ b/init/initramfs.c
96244@@ -25,7 +25,7 @@ static ssize_t __init xwrite(int fd, const char *p, size_t count)
96245
96246 /* sys_write only can write MAX_RW_COUNT aka 2G-4K bytes at most */
96247 while (count) {
96248- ssize_t rv = sys_write(fd, p, count);
96249+ ssize_t rv = sys_write(fd, (char __force_user *)p, count);
96250
96251 if (rv < 0) {
96252 if (rv == -EINTR || rv == -EAGAIN)
96253@@ -107,7 +107,7 @@ static void __init free_hash(void)
96254 }
96255 }
96256
96257-static long __init do_utime(char *filename, time_t mtime)
96258+static long __init do_utime(char __force_user *filename, time_t mtime)
96259 {
96260 struct timespec t[2];
96261
96262@@ -142,7 +142,7 @@ static void __init dir_utime(void)
96263 struct dir_entry *de, *tmp;
96264 list_for_each_entry_safe(de, tmp, &dir_list, list) {
96265 list_del(&de->list);
96266- do_utime(de->name, de->mtime);
96267+ do_utime((char __force_user *)de->name, de->mtime);
96268 kfree(de->name);
96269 kfree(de);
96270 }
96271@@ -304,7 +304,7 @@ static int __init maybe_link(void)
96272 if (nlink >= 2) {
96273 char *old = find_link(major, minor, ino, mode, collected);
96274 if (old)
96275- return (sys_link(old, collected) < 0) ? -1 : 1;
96276+ return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
96277 }
96278 return 0;
96279 }
96280@@ -313,11 +313,11 @@ static void __init clean_path(char *path, umode_t fmode)
96281 {
96282 struct stat st;
96283
96284- if (!sys_newlstat(path, &st) && (st.st_mode ^ fmode) & S_IFMT) {
96285+ if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode ^ fmode) & S_IFMT) {
96286 if (S_ISDIR(st.st_mode))
96287- sys_rmdir(path);
96288+ sys_rmdir((char __force_user *)path);
96289 else
96290- sys_unlink(path);
96291+ sys_unlink((char __force_user *)path);
96292 }
96293 }
96294
96295@@ -338,7 +338,7 @@ static int __init do_name(void)
96296 int openflags = O_WRONLY|O_CREAT;
96297 if (ml != 1)
96298 openflags |= O_TRUNC;
96299- wfd = sys_open(collected, openflags, mode);
96300+ wfd = sys_open((char __force_user *)collected, openflags, mode);
96301
96302 if (wfd >= 0) {
96303 sys_fchown(wfd, uid, gid);
96304@@ -350,17 +350,17 @@ static int __init do_name(void)
96305 }
96306 }
96307 } else if (S_ISDIR(mode)) {
96308- sys_mkdir(collected, mode);
96309- sys_chown(collected, uid, gid);
96310- sys_chmod(collected, mode);
96311+ sys_mkdir((char __force_user *)collected, mode);
96312+ sys_chown((char __force_user *)collected, uid, gid);
96313+ sys_chmod((char __force_user *)collected, mode);
96314 dir_add(collected, mtime);
96315 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
96316 S_ISFIFO(mode) || S_ISSOCK(mode)) {
96317 if (maybe_link() == 0) {
96318- sys_mknod(collected, mode, rdev);
96319- sys_chown(collected, uid, gid);
96320- sys_chmod(collected, mode);
96321- do_utime(collected, mtime);
96322+ sys_mknod((char __force_user *)collected, mode, rdev);
96323+ sys_chown((char __force_user *)collected, uid, gid);
96324+ sys_chmod((char __force_user *)collected, mode);
96325+ do_utime((char __force_user *)collected, mtime);
96326 }
96327 }
96328 return 0;
96329@@ -372,7 +372,7 @@ static int __init do_copy(void)
96330 if (xwrite(wfd, victim, body_len) != body_len)
96331 error("write error");
96332 sys_close(wfd);
96333- do_utime(vcollected, mtime);
96334+ do_utime((char __force_user *)vcollected, mtime);
96335 kfree(vcollected);
96336 eat(body_len);
96337 state = SkipIt;
96338@@ -390,9 +390,9 @@ static int __init do_symlink(void)
96339 {
96340 collected[N_ALIGN(name_len) + body_len] = '\0';
96341 clean_path(collected, 0);
96342- sys_symlink(collected + N_ALIGN(name_len), collected);
96343- sys_lchown(collected, uid, gid);
96344- do_utime(collected, mtime);
96345+ sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
96346+ sys_lchown((char __force_user *)collected, uid, gid);
96347+ do_utime((char __force_user *)collected, mtime);
96348 state = SkipIt;
96349 next_state = Reset;
96350 return 0;
96351diff --git a/init/main.c b/init/main.c
96352index 6f0f1c5f..a542824 100644
96353--- a/init/main.c
96354+++ b/init/main.c
96355@@ -96,6 +96,8 @@ extern void radix_tree_init(void);
96356 static inline void mark_rodata_ro(void) { }
96357 #endif
96358
96359+extern void grsecurity_init(void);
96360+
96361 /*
96362 * Debug helper: via this flag we know that we are in 'early bootup code'
96363 * where only the boot processor is running with IRQ disabled. This means
96364@@ -157,6 +159,85 @@ static int __init set_reset_devices(char *str)
96365
96366 __setup("reset_devices", set_reset_devices);
96367
96368+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
96369+kgid_t grsec_proc_gid = KGIDT_INIT(CONFIG_GRKERNSEC_PROC_GID);
96370+static int __init setup_grsec_proc_gid(char *str)
96371+{
96372+ grsec_proc_gid = KGIDT_INIT(simple_strtol(str, NULL, 0));
96373+ return 1;
96374+}
96375+__setup("grsec_proc_gid=", setup_grsec_proc_gid);
96376+#endif
96377+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
96378+int grsec_enable_sysfs_restrict = 1;
96379+static int __init setup_grsec_sysfs_restrict(char *str)
96380+{
96381+ if (!simple_strtol(str, NULL, 0))
96382+ grsec_enable_sysfs_restrict = 0;
96383+ return 1;
96384+}
96385+__setup("grsec_sysfs_restrict", setup_grsec_sysfs_restrict);
96386+#endif
96387+
96388+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
96389+unsigned long pax_user_shadow_base __read_only;
96390+EXPORT_SYMBOL(pax_user_shadow_base);
96391+extern char pax_enter_kernel_user[];
96392+extern char pax_exit_kernel_user[];
96393+#endif
96394+
96395+#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
96396+static int __init setup_pax_nouderef(char *str)
96397+{
96398+#ifdef CONFIG_X86_32
96399+ unsigned int cpu;
96400+ struct desc_struct *gdt;
96401+
96402+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
96403+ gdt = get_cpu_gdt_table(cpu);
96404+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
96405+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
96406+ gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
96407+ gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
96408+ }
96409+ loadsegment(ds, __KERNEL_DS);
96410+ loadsegment(es, __KERNEL_DS);
96411+ loadsegment(ss, __KERNEL_DS);
96412+#else
96413+ memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
96414+ memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
96415+ clone_pgd_mask = ~(pgdval_t)0UL;
96416+ pax_user_shadow_base = 0UL;
96417+ setup_clear_cpu_cap(X86_FEATURE_PCID);
96418+ setup_clear_cpu_cap(X86_FEATURE_INVPCID);
96419+#endif
96420+
96421+ return 0;
96422+}
96423+early_param("pax_nouderef", setup_pax_nouderef);
96424+
96425+#ifdef CONFIG_X86_64
96426+static int __init setup_pax_weakuderef(char *str)
96427+{
96428+ if (clone_pgd_mask != ~(pgdval_t)0UL)
96429+ pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
96430+ return 1;
96431+}
96432+__setup("pax_weakuderef", setup_pax_weakuderef);
96433+#endif
96434+#endif
96435+
96436+#ifdef CONFIG_PAX_SOFTMODE
96437+int pax_softmode;
96438+
96439+static int __init setup_pax_softmode(char *str)
96440+{
96441+ get_option(&str, &pax_softmode);
96442+ return 1;
96443+}
96444+__setup("pax_softmode=", setup_pax_softmode);
96445+#endif
96446+
96447 static const char *argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
96448 const char *envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
96449 static const char *panic_later, *panic_param;
96450@@ -722,7 +803,7 @@ static bool __init_or_module initcall_blacklisted(initcall_t fn)
96451 struct blacklist_entry *entry;
96452 char *fn_name;
96453
96454- fn_name = kasprintf(GFP_KERNEL, "%pf", fn);
96455+ fn_name = kasprintf(GFP_KERNEL, "%pX", fn);
96456 if (!fn_name)
96457 return false;
96458
96459@@ -774,7 +855,7 @@ int __init_or_module do_one_initcall(initcall_t fn)
96460 {
96461 int count = preempt_count();
96462 int ret;
96463- char msgbuf[64];
96464+ const char *msg1 = "", *msg2 = "";
96465
96466 if (initcall_blacklisted(fn))
96467 return -EPERM;
96468@@ -784,18 +865,17 @@ int __init_or_module do_one_initcall(initcall_t fn)
96469 else
96470 ret = fn();
96471
96472- msgbuf[0] = 0;
96473-
96474 if (preempt_count() != count) {
96475- sprintf(msgbuf, "preemption imbalance ");
96476+ msg1 = " preemption imbalance";
96477 preempt_count_set(count);
96478 }
96479 if (irqs_disabled()) {
96480- strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
96481+ msg2 = " disabled interrupts";
96482 local_irq_enable();
96483 }
96484- WARN(msgbuf[0], "initcall %pF returned with %s\n", fn, msgbuf);
96485+ WARN(*msg1 || *msg2, "initcall %pF returned with%s%s\n", fn, msg1, msg2);
96486
96487+ add_latent_entropy();
96488 return ret;
96489 }
96490
96491@@ -901,8 +981,8 @@ static int run_init_process(const char *init_filename)
96492 {
96493 argv_init[0] = init_filename;
96494 return do_execve(getname_kernel(init_filename),
96495- (const char __user *const __user *)argv_init,
96496- (const char __user *const __user *)envp_init);
96497+ (const char __user *const __force_user *)argv_init,
96498+ (const char __user *const __force_user *)envp_init);
96499 }
96500
96501 static int try_to_run_init_process(const char *init_filename)
96502@@ -919,6 +999,10 @@ static int try_to_run_init_process(const char *init_filename)
96503 return ret;
96504 }
96505
96506+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
96507+extern int gr_init_ran;
96508+#endif
96509+
96510 static noinline void __init kernel_init_freeable(void);
96511
96512 static int __ref kernel_init(void *unused)
96513@@ -943,6 +1027,11 @@ static int __ref kernel_init(void *unused)
96514 ramdisk_execute_command, ret);
96515 }
96516
96517+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
96518+ /* if no initrd was used, be extra sure we enforce chroot restrictions */
96519+ gr_init_ran = 1;
96520+#endif
96521+
96522 /*
96523 * We try each of these until one succeeds.
96524 *
96525@@ -998,7 +1087,7 @@ static noinline void __init kernel_init_freeable(void)
96526 do_basic_setup();
96527
96528 /* Open the /dev/console on the rootfs, this should never fail */
96529- if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
96530+ if (sys_open((const char __force_user *) "/dev/console", O_RDWR, 0) < 0)
96531 pr_err("Warning: unable to open an initial console.\n");
96532
96533 (void) sys_dup(0);
96534@@ -1011,11 +1100,13 @@ static noinline void __init kernel_init_freeable(void)
96535 if (!ramdisk_execute_command)
96536 ramdisk_execute_command = "/init";
96537
96538- if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
96539+ if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
96540 ramdisk_execute_command = NULL;
96541 prepare_namespace();
96542 }
96543
96544+ grsecurity_init();
96545+
96546 /*
96547 * Ok, we have completed the initial bootup, and
96548 * we're essentially up and running. Get rid of the
96549diff --git a/ipc/compat.c b/ipc/compat.c
96550index 9b3c85f..5266b0f 100644
96551--- a/ipc/compat.c
96552+++ b/ipc/compat.c
96553@@ -396,7 +396,7 @@ COMPAT_SYSCALL_DEFINE6(ipc, u32, call, int, first, int, second,
96554 COMPAT_SHMLBA);
96555 if (err < 0)
96556 return err;
96557- return put_user(raddr, (compat_ulong_t *)compat_ptr(third));
96558+ return put_user(raddr, (compat_ulong_t __user *)compat_ptr(third));
96559 }
96560 case SHMDT:
96561 return sys_shmdt(compat_ptr(ptr));
96562@@ -747,7 +747,7 @@ COMPAT_SYSCALL_DEFINE3(shmctl, int, first, int, second, void __user *, uptr)
96563 }
96564
96565 COMPAT_SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsems,
96566- unsigned, nsops,
96567+ compat_long_t, nsops,
96568 const struct compat_timespec __user *, timeout)
96569 {
96570 struct timespec __user *ts64;
96571diff --git a/ipc/ipc_sysctl.c b/ipc/ipc_sysctl.c
96572index 8ad93c2..efd80f8 100644
96573--- a/ipc/ipc_sysctl.c
96574+++ b/ipc/ipc_sysctl.c
96575@@ -30,7 +30,7 @@ static void *get_ipc(struct ctl_table *table)
96576 static int proc_ipc_dointvec(struct ctl_table *table, int write,
96577 void __user *buffer, size_t *lenp, loff_t *ppos)
96578 {
96579- struct ctl_table ipc_table;
96580+ ctl_table_no_const ipc_table;
96581
96582 memcpy(&ipc_table, table, sizeof(ipc_table));
96583 ipc_table.data = get_ipc(table);
96584@@ -41,7 +41,7 @@ static int proc_ipc_dointvec(struct ctl_table *table, int write,
96585 static int proc_ipc_dointvec_minmax(struct ctl_table *table, int write,
96586 void __user *buffer, size_t *lenp, loff_t *ppos)
96587 {
96588- struct ctl_table ipc_table;
96589+ ctl_table_no_const ipc_table;
96590
96591 memcpy(&ipc_table, table, sizeof(ipc_table));
96592 ipc_table.data = get_ipc(table);
96593@@ -65,7 +65,7 @@ static int proc_ipc_dointvec_minmax_orphans(struct ctl_table *table, int write,
96594 static int proc_ipc_doulongvec_minmax(struct ctl_table *table, int write,
96595 void __user *buffer, size_t *lenp, loff_t *ppos)
96596 {
96597- struct ctl_table ipc_table;
96598+ ctl_table_no_const ipc_table;
96599 memcpy(&ipc_table, table, sizeof(ipc_table));
96600 ipc_table.data = get_ipc(table);
96601
96602@@ -76,7 +76,7 @@ static int proc_ipc_doulongvec_minmax(struct ctl_table *table, int write,
96603 static int proc_ipc_auto_msgmni(struct ctl_table *table, int write,
96604 void __user *buffer, size_t *lenp, loff_t *ppos)
96605 {
96606- struct ctl_table ipc_table;
96607+ ctl_table_no_const ipc_table;
96608 int dummy = 0;
96609
96610 memcpy(&ipc_table, table, sizeof(ipc_table));
96611diff --git a/ipc/mq_sysctl.c b/ipc/mq_sysctl.c
96612index 68d4e95..1477ded 100644
96613--- a/ipc/mq_sysctl.c
96614+++ b/ipc/mq_sysctl.c
96615@@ -25,7 +25,7 @@ static void *get_mq(struct ctl_table *table)
96616 static int proc_mq_dointvec(struct ctl_table *table, int write,
96617 void __user *buffer, size_t *lenp, loff_t *ppos)
96618 {
96619- struct ctl_table mq_table;
96620+ ctl_table_no_const mq_table;
96621 memcpy(&mq_table, table, sizeof(mq_table));
96622 mq_table.data = get_mq(table);
96623
96624@@ -35,7 +35,7 @@ static int proc_mq_dointvec(struct ctl_table *table, int write,
96625 static int proc_mq_dointvec_minmax(struct ctl_table *table, int write,
96626 void __user *buffer, size_t *lenp, loff_t *ppos)
96627 {
96628- struct ctl_table mq_table;
96629+ ctl_table_no_const mq_table;
96630 memcpy(&mq_table, table, sizeof(mq_table));
96631 mq_table.data = get_mq(table);
96632
96633diff --git a/ipc/mqueue.c b/ipc/mqueue.c
96634index 7635a1c..7432cb6 100644
96635--- a/ipc/mqueue.c
96636+++ b/ipc/mqueue.c
96637@@ -278,6 +278,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
96638 mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
96639 info->attr.mq_msgsize);
96640
96641+ gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
96642 spin_lock(&mq_lock);
96643 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
96644 u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) {
96645diff --git a/ipc/sem.c b/ipc/sem.c
96646index 9284211..bca5b1b 100644
96647--- a/ipc/sem.c
96648+++ b/ipc/sem.c
96649@@ -1780,7 +1780,7 @@ static int get_queue_result(struct sem_queue *q)
96650 }
96651
96652 SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
96653- unsigned, nsops, const struct timespec __user *, timeout)
96654+ long, nsops, const struct timespec __user *, timeout)
96655 {
96656 int error = -EINVAL;
96657 struct sem_array *sma;
96658@@ -2015,7 +2015,7 @@ out_free:
96659 }
96660
96661 SYSCALL_DEFINE3(semop, int, semid, struct sembuf __user *, tsops,
96662- unsigned, nsops)
96663+ long, nsops)
96664 {
96665 return sys_semtimedop(semid, tsops, nsops, NULL);
96666 }
96667diff --git a/ipc/shm.c b/ipc/shm.c
96668index 19633b4..d454904 100644
96669--- a/ipc/shm.c
96670+++ b/ipc/shm.c
96671@@ -72,6 +72,14 @@ static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp);
96672 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
96673 #endif
96674
96675+#ifdef CONFIG_GRKERNSEC
96676+extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
96677+ const u64 shm_createtime, const kuid_t cuid,
96678+ const int shmid);
96679+extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
96680+ const u64 shm_createtime);
96681+#endif
96682+
96683 void shm_init_ns(struct ipc_namespace *ns)
96684 {
96685 ns->shm_ctlmax = SHMMAX;
96686@@ -560,6 +568,9 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
96687 shp->shm_lprid = 0;
96688 shp->shm_atim = shp->shm_dtim = 0;
96689 shp->shm_ctim = get_seconds();
96690+#ifdef CONFIG_GRKERNSEC
96691+ shp->shm_createtime = ktime_get_ns();
96692+#endif
96693 shp->shm_segsz = size;
96694 shp->shm_nattch = 0;
96695 shp->shm_file = file;
96696@@ -1096,6 +1107,12 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
96697 f_mode = FMODE_READ | FMODE_WRITE;
96698 }
96699 if (shmflg & SHM_EXEC) {
96700+
96701+#ifdef CONFIG_PAX_MPROTECT
96702+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
96703+ goto out;
96704+#endif
96705+
96706 prot |= PROT_EXEC;
96707 acc_mode |= S_IXUGO;
96708 }
96709@@ -1120,6 +1137,15 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
96710 if (err)
96711 goto out_unlock;
96712
96713+#ifdef CONFIG_GRKERNSEC
96714+ if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
96715+ shp->shm_perm.cuid, shmid) ||
96716+ !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
96717+ err = -EACCES;
96718+ goto out_unlock;
96719+ }
96720+#endif
96721+
96722 ipc_lock_object(&shp->shm_perm);
96723
96724 /* check if shm_destroy() is tearing down shp */
96725@@ -1132,6 +1158,9 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
96726 path = shp->shm_file->f_path;
96727 path_get(&path);
96728 shp->shm_nattch++;
96729+#ifdef CONFIG_GRKERNSEC
96730+ shp->shm_lapid = current->pid;
96731+#endif
96732 size = i_size_read(path.dentry->d_inode);
96733 ipc_unlock_object(&shp->shm_perm);
96734 rcu_read_unlock();
96735diff --git a/ipc/util.c b/ipc/util.c
96736index 106bed0..f851429 100644
96737--- a/ipc/util.c
96738+++ b/ipc/util.c
96739@@ -71,6 +71,8 @@ struct ipc_proc_iface {
96740 int (*show)(struct seq_file *, void *);
96741 };
96742
96743+extern int gr_ipc_permitted(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, int requested_mode, int granted_mode);
96744+
96745 /**
96746 * ipc_init - initialise ipc subsystem
96747 *
96748@@ -497,6 +499,10 @@ int ipcperms(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, short flag)
96749 granted_mode >>= 6;
96750 else if (in_group_p(ipcp->cgid) || in_group_p(ipcp->gid))
96751 granted_mode >>= 3;
96752+
96753+ if (!gr_ipc_permitted(ns, ipcp, requested_mode, granted_mode))
96754+ return -1;
96755+
96756 /* is there some bit set in requested_mode but not in granted_mode? */
96757 if ((requested_mode & ~granted_mode & 0007) &&
96758 !ns_capable(ns->user_ns, CAP_IPC_OWNER))
96759diff --git a/kernel/audit.c b/kernel/audit.c
96760index 72ab759..757deba 100644
96761--- a/kernel/audit.c
96762+++ b/kernel/audit.c
96763@@ -122,7 +122,7 @@ u32 audit_sig_sid = 0;
96764 3) suppressed due to audit_rate_limit
96765 4) suppressed due to audit_backlog_limit
96766 */
96767-static atomic_t audit_lost = ATOMIC_INIT(0);
96768+static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
96769
96770 /* The netlink socket. */
96771 static struct sock *audit_sock;
96772@@ -256,7 +256,7 @@ void audit_log_lost(const char *message)
96773 unsigned long now;
96774 int print;
96775
96776- atomic_inc(&audit_lost);
96777+ atomic_inc_unchecked(&audit_lost);
96778
96779 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
96780
96781@@ -273,7 +273,7 @@ void audit_log_lost(const char *message)
96782 if (print) {
96783 if (printk_ratelimit())
96784 pr_warn("audit_lost=%u audit_rate_limit=%u audit_backlog_limit=%u\n",
96785- atomic_read(&audit_lost),
96786+ atomic_read_unchecked(&audit_lost),
96787 audit_rate_limit,
96788 audit_backlog_limit);
96789 audit_panic(message);
96790@@ -831,7 +831,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
96791 s.pid = audit_pid;
96792 s.rate_limit = audit_rate_limit;
96793 s.backlog_limit = audit_backlog_limit;
96794- s.lost = atomic_read(&audit_lost);
96795+ s.lost = atomic_read_unchecked(&audit_lost);
96796 s.backlog = skb_queue_len(&audit_skb_queue);
96797 s.feature_bitmap = AUDIT_FEATURE_BITMAP_ALL;
96798 s.backlog_wait_time = audit_backlog_wait_time;
96799diff --git a/kernel/auditsc.c b/kernel/auditsc.c
96800index dc4ae70..14681ff 100644
96801--- a/kernel/auditsc.c
96802+++ b/kernel/auditsc.c
96803@@ -1023,7 +1023,7 @@ static int audit_log_single_execve_arg(struct audit_context *context,
96804 * for strings that are too long, we should not have created
96805 * any.
96806 */
96807- if (unlikely((len == -1) || len > MAX_ARG_STRLEN - 1)) {
96808+ if (unlikely(len > MAX_ARG_STRLEN - 1)) {
96809 WARN_ON(1);
96810 send_sig(SIGKILL, current, 0);
96811 return -1;
96812@@ -1955,7 +1955,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
96813 }
96814
96815 /* global counter which is incremented every time something logs in */
96816-static atomic_t session_id = ATOMIC_INIT(0);
96817+static atomic_unchecked_t session_id = ATOMIC_INIT(0);
96818
96819 static int audit_set_loginuid_perm(kuid_t loginuid)
96820 {
96821@@ -2022,7 +2022,7 @@ int audit_set_loginuid(kuid_t loginuid)
96822
96823 /* are we setting or clearing? */
96824 if (uid_valid(loginuid))
96825- sessionid = (unsigned int)atomic_inc_return(&session_id);
96826+ sessionid = (unsigned int)atomic_inc_return_unchecked(&session_id);
96827
96828 task->sessionid = sessionid;
96829 task->loginuid = loginuid;
96830diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
96831index 0c5796e..a9414e2 100644
96832--- a/kernel/bpf/core.c
96833+++ b/kernel/bpf/core.c
96834@@ -143,14 +143,17 @@ bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
96835 * random section of illegal instructions.
96836 */
96837 size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE);
96838- hdr = module_alloc(size);
96839+ hdr = module_alloc_exec(size);
96840 if (hdr == NULL)
96841 return NULL;
96842
96843 /* Fill space with illegal/arch-dep instructions. */
96844 bpf_fill_ill_insns(hdr, size);
96845
96846+ pax_open_kernel();
96847 hdr->pages = size / PAGE_SIZE;
96848+ pax_close_kernel();
96849+
96850 hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)),
96851 PAGE_SIZE - sizeof(*hdr));
96852 start = (prandom_u32() % hole) & ~(alignment - 1);
96853@@ -163,7 +166,7 @@ bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
96854
96855 void bpf_jit_binary_free(struct bpf_binary_header *hdr)
96856 {
96857- module_memfree(hdr);
96858+ module_memfree_exec(hdr);
96859 }
96860 #endif /* CONFIG_BPF_JIT */
96861
96862diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
96863index 536edc2..d28c85d 100644
96864--- a/kernel/bpf/syscall.c
96865+++ b/kernel/bpf/syscall.c
96866@@ -548,11 +548,15 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz
96867 int err;
96868
96869 /* the syscall is limited to root temporarily. This restriction will be
96870- * lifted when security audit is clean. Note that eBPF+tracing must have
96871- * this restriction, since it may pass kernel data to user space
96872+ * lifted by upstream when a half-assed security audit is clean. Note
96873+ * that eBPF+tracing must have this restriction, since it may pass
96874+ * kernel data to user space
96875 */
96876 if (!capable(CAP_SYS_ADMIN))
96877 return -EPERM;
96878+#ifdef CONFIG_GRKERNSEC
96879+ return -EPERM;
96880+#endif
96881
96882 if (!access_ok(VERIFY_READ, uattr, 1))
96883 return -EFAULT;
96884diff --git a/kernel/capability.c b/kernel/capability.c
96885index 989f5bf..d317ca0 100644
96886--- a/kernel/capability.c
96887+++ b/kernel/capability.c
96888@@ -192,6 +192,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr)
96889 * before modification is attempted and the application
96890 * fails.
96891 */
96892+ if (tocopy > ARRAY_SIZE(kdata))
96893+ return -EFAULT;
96894+
96895 if (copy_to_user(dataptr, kdata, tocopy
96896 * sizeof(struct __user_cap_data_struct))) {
96897 return -EFAULT;
96898@@ -297,10 +300,11 @@ bool has_ns_capability(struct task_struct *t,
96899 int ret;
96900
96901 rcu_read_lock();
96902- ret = security_capable(__task_cred(t), ns, cap);
96903+ ret = security_capable(__task_cred(t), ns, cap) == 0 &&
96904+ gr_task_is_capable(t, __task_cred(t), cap);
96905 rcu_read_unlock();
96906
96907- return (ret == 0);
96908+ return ret;
96909 }
96910
96911 /**
96912@@ -337,10 +341,10 @@ bool has_ns_capability_noaudit(struct task_struct *t,
96913 int ret;
96914
96915 rcu_read_lock();
96916- ret = security_capable_noaudit(__task_cred(t), ns, cap);
96917+ ret = security_capable_noaudit(__task_cred(t), ns, cap) == 0 && gr_task_is_capable_nolog(t, cap);
96918 rcu_read_unlock();
96919
96920- return (ret == 0);
96921+ return ret;
96922 }
96923
96924 /**
96925@@ -378,7 +382,7 @@ bool ns_capable(struct user_namespace *ns, int cap)
96926 BUG();
96927 }
96928
96929- if (security_capable(current_cred(), ns, cap) == 0) {
96930+ if (security_capable(current_cred(), ns, cap) == 0 && gr_is_capable(cap)) {
96931 current->flags |= PF_SUPERPRIV;
96932 return true;
96933 }
96934@@ -386,6 +390,21 @@ bool ns_capable(struct user_namespace *ns, int cap)
96935 }
96936 EXPORT_SYMBOL(ns_capable);
96937
96938+bool ns_capable_nolog(struct user_namespace *ns, int cap)
96939+{
96940+ if (unlikely(!cap_valid(cap))) {
96941+ printk(KERN_CRIT "capable_nolog() called with invalid cap=%u\n", cap);
96942+ BUG();
96943+ }
96944+
96945+ if (security_capable_noaudit(current_cred(), ns, cap) == 0 && gr_is_capable_nolog(cap)) {
96946+ current->flags |= PF_SUPERPRIV;
96947+ return true;
96948+ }
96949+ return false;
96950+}
96951+EXPORT_SYMBOL(ns_capable_nolog);
96952+
96953 /**
96954 * file_ns_capable - Determine if the file's opener had a capability in effect
96955 * @file: The file we want to check
96956@@ -427,6 +446,12 @@ bool capable(int cap)
96957 }
96958 EXPORT_SYMBOL(capable);
96959
96960+bool capable_nolog(int cap)
96961+{
96962+ return ns_capable_nolog(&init_user_ns, cap);
96963+}
96964+EXPORT_SYMBOL(capable_nolog);
96965+
96966 /**
96967 * capable_wrt_inode_uidgid - Check nsown_capable and uid and gid mapped
96968 * @inode: The inode in question
96969@@ -444,3 +469,12 @@ bool capable_wrt_inode_uidgid(const struct inode *inode, int cap)
96970 kgid_has_mapping(ns, inode->i_gid);
96971 }
96972 EXPORT_SYMBOL(capable_wrt_inode_uidgid);
96973+
96974+bool capable_wrt_inode_uidgid_nolog(const struct inode *inode, int cap)
96975+{
96976+ struct user_namespace *ns = current_user_ns();
96977+
96978+ return ns_capable_nolog(ns, cap) && kuid_has_mapping(ns, inode->i_uid) &&
96979+ kgid_has_mapping(ns, inode->i_gid);
96980+}
96981+EXPORT_SYMBOL(capable_wrt_inode_uidgid_nolog);
96982diff --git a/kernel/cgroup.c b/kernel/cgroup.c
96983index 29a7b2c..a64e30a 100644
96984--- a/kernel/cgroup.c
96985+++ b/kernel/cgroup.c
96986@@ -5347,6 +5347,9 @@ static void cgroup_release_agent(struct work_struct *work)
96987 if (!pathbuf || !agentbuf)
96988 goto out;
96989
96990+ if (agentbuf[0] == '\0')
96991+ goto out;
96992+
96993 path = cgroup_path(cgrp, pathbuf, PATH_MAX);
96994 if (!path)
96995 goto out;
96996@@ -5532,7 +5535,7 @@ static int cgroup_css_links_read(struct seq_file *seq, void *v)
96997 struct task_struct *task;
96998 int count = 0;
96999
97000- seq_printf(seq, "css_set %p\n", cset);
97001+ seq_printf(seq, "css_set %pK\n", cset);
97002
97003 list_for_each_entry(task, &cset->tasks, cg_list) {
97004 if (count++ > MAX_TASKS_SHOWN_PER_CSS)
97005diff --git a/kernel/compat.c b/kernel/compat.c
97006index 24f0061..762ec00 100644
97007--- a/kernel/compat.c
97008+++ b/kernel/compat.c
97009@@ -13,6 +13,7 @@
97010
97011 #include <linux/linkage.h>
97012 #include <linux/compat.h>
97013+#include <linux/module.h>
97014 #include <linux/errno.h>
97015 #include <linux/time.h>
97016 #include <linux/signal.h>
97017@@ -220,7 +221,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
97018 mm_segment_t oldfs;
97019 long ret;
97020
97021- restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
97022+ restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
97023 oldfs = get_fs();
97024 set_fs(KERNEL_DS);
97025 ret = hrtimer_nanosleep_restart(restart);
97026@@ -252,7 +253,7 @@ COMPAT_SYSCALL_DEFINE2(nanosleep, struct compat_timespec __user *, rqtp,
97027 oldfs = get_fs();
97028 set_fs(KERNEL_DS);
97029 ret = hrtimer_nanosleep(&tu,
97030- rmtp ? (struct timespec __user *)&rmt : NULL,
97031+ rmtp ? (struct timespec __force_user *)&rmt : NULL,
97032 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
97033 set_fs(oldfs);
97034
97035@@ -378,7 +379,7 @@ COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set)
97036 mm_segment_t old_fs = get_fs();
97037
97038 set_fs(KERNEL_DS);
97039- ret = sys_sigpending((old_sigset_t __user *) &s);
97040+ ret = sys_sigpending((old_sigset_t __force_user *) &s);
97041 set_fs(old_fs);
97042 if (ret == 0)
97043 ret = put_user(s, set);
97044@@ -468,7 +469,7 @@ COMPAT_SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource,
97045 mm_segment_t old_fs = get_fs();
97046
97047 set_fs(KERNEL_DS);
97048- ret = sys_old_getrlimit(resource, (struct rlimit __user *)&r);
97049+ ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
97050 set_fs(old_fs);
97051
97052 if (!ret) {
97053@@ -550,8 +551,8 @@ COMPAT_SYSCALL_DEFINE4(wait4,
97054 set_fs (KERNEL_DS);
97055 ret = sys_wait4(pid,
97056 (stat_addr ?
97057- (unsigned int __user *) &status : NULL),
97058- options, (struct rusage __user *) &r);
97059+ (unsigned int __force_user *) &status : NULL),
97060+ options, (struct rusage __force_user *) &r);
97061 set_fs (old_fs);
97062
97063 if (ret > 0) {
97064@@ -577,8 +578,8 @@ COMPAT_SYSCALL_DEFINE5(waitid,
97065 memset(&info, 0, sizeof(info));
97066
97067 set_fs(KERNEL_DS);
97068- ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
97069- uru ? (struct rusage __user *)&ru : NULL);
97070+ ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
97071+ uru ? (struct rusage __force_user *)&ru : NULL);
97072 set_fs(old_fs);
97073
97074 if ((ret < 0) || (info.si_signo == 0))
97075@@ -712,8 +713,8 @@ COMPAT_SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags,
97076 oldfs = get_fs();
97077 set_fs(KERNEL_DS);
97078 err = sys_timer_settime(timer_id, flags,
97079- (struct itimerspec __user *) &newts,
97080- (struct itimerspec __user *) &oldts);
97081+ (struct itimerspec __force_user *) &newts,
97082+ (struct itimerspec __force_user *) &oldts);
97083 set_fs(oldfs);
97084 if (!err && old && put_compat_itimerspec(old, &oldts))
97085 return -EFAULT;
97086@@ -730,7 +731,7 @@ COMPAT_SYSCALL_DEFINE2(timer_gettime, timer_t, timer_id,
97087 oldfs = get_fs();
97088 set_fs(KERNEL_DS);
97089 err = sys_timer_gettime(timer_id,
97090- (struct itimerspec __user *) &ts);
97091+ (struct itimerspec __force_user *) &ts);
97092 set_fs(oldfs);
97093 if (!err && put_compat_itimerspec(setting, &ts))
97094 return -EFAULT;
97095@@ -749,7 +750,7 @@ COMPAT_SYSCALL_DEFINE2(clock_settime, clockid_t, which_clock,
97096 oldfs = get_fs();
97097 set_fs(KERNEL_DS);
97098 err = sys_clock_settime(which_clock,
97099- (struct timespec __user *) &ts);
97100+ (struct timespec __force_user *) &ts);
97101 set_fs(oldfs);
97102 return err;
97103 }
97104@@ -764,7 +765,7 @@ COMPAT_SYSCALL_DEFINE2(clock_gettime, clockid_t, which_clock,
97105 oldfs = get_fs();
97106 set_fs(KERNEL_DS);
97107 err = sys_clock_gettime(which_clock,
97108- (struct timespec __user *) &ts);
97109+ (struct timespec __force_user *) &ts);
97110 set_fs(oldfs);
97111 if (!err && compat_put_timespec(&ts, tp))
97112 return -EFAULT;
97113@@ -784,7 +785,7 @@ COMPAT_SYSCALL_DEFINE2(clock_adjtime, clockid_t, which_clock,
97114
97115 oldfs = get_fs();
97116 set_fs(KERNEL_DS);
97117- ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc);
97118+ ret = sys_clock_adjtime(which_clock, (struct timex __force_user *) &txc);
97119 set_fs(oldfs);
97120
97121 err = compat_put_timex(utp, &txc);
97122@@ -804,7 +805,7 @@ COMPAT_SYSCALL_DEFINE2(clock_getres, clockid_t, which_clock,
97123 oldfs = get_fs();
97124 set_fs(KERNEL_DS);
97125 err = sys_clock_getres(which_clock,
97126- (struct timespec __user *) &ts);
97127+ (struct timespec __force_user *) &ts);
97128 set_fs(oldfs);
97129 if (!err && tp && compat_put_timespec(&ts, tp))
97130 return -EFAULT;
97131@@ -818,7 +819,7 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
97132 struct timespec tu;
97133 struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
97134
97135- restart->nanosleep.rmtp = (struct timespec __user *) &tu;
97136+ restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
97137 oldfs = get_fs();
97138 set_fs(KERNEL_DS);
97139 err = clock_nanosleep_restart(restart);
97140@@ -850,8 +851,8 @@ COMPAT_SYSCALL_DEFINE4(clock_nanosleep, clockid_t, which_clock, int, flags,
97141 oldfs = get_fs();
97142 set_fs(KERNEL_DS);
97143 err = sys_clock_nanosleep(which_clock, flags,
97144- (struct timespec __user *) &in,
97145- (struct timespec __user *) &out);
97146+ (struct timespec __force_user *) &in,
97147+ (struct timespec __force_user *) &out);
97148 set_fs(oldfs);
97149
97150 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
97151@@ -912,7 +913,8 @@ long compat_get_bitmap(unsigned long *mask, const compat_ulong_t __user *umask,
97152 * bitmap. We must however ensure the end of the
97153 * kernel bitmap is zeroed.
97154 */
97155- if (nr_compat_longs-- > 0) {
97156+ if (nr_compat_longs) {
97157+ nr_compat_longs--;
97158 if (__get_user(um, umask))
97159 return -EFAULT;
97160 } else {
97161@@ -954,7 +956,8 @@ long compat_put_bitmap(compat_ulong_t __user *umask, unsigned long *mask,
97162 * We dont want to write past the end of the userspace
97163 * bitmap.
97164 */
97165- if (nr_compat_longs-- > 0) {
97166+ if (nr_compat_longs) {
97167+ nr_compat_longs--;
97168 if (__put_user(um, umask))
97169 return -EFAULT;
97170 }
97171@@ -1145,7 +1148,7 @@ COMPAT_SYSCALL_DEFINE2(sched_rr_get_interval,
97172 mm_segment_t old_fs = get_fs();
97173
97174 set_fs(KERNEL_DS);
97175- ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
97176+ ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
97177 set_fs(old_fs);
97178 if (compat_put_timespec(&t, interval))
97179 return -EFAULT;
97180diff --git a/kernel/configs.c b/kernel/configs.c
97181index c18b1f1..b9a0132 100644
97182--- a/kernel/configs.c
97183+++ b/kernel/configs.c
97184@@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
97185 struct proc_dir_entry *entry;
97186
97187 /* create the current config file */
97188+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
97189+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
97190+ entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
97191+ &ikconfig_file_ops);
97192+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
97193+ entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
97194+ &ikconfig_file_ops);
97195+#endif
97196+#else
97197 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
97198 &ikconfig_file_ops);
97199+#endif
97200+
97201 if (!entry)
97202 return -ENOMEM;
97203
97204diff --git a/kernel/cred.c b/kernel/cred.c
97205index e0573a4..26c0fd3 100644
97206--- a/kernel/cred.c
97207+++ b/kernel/cred.c
97208@@ -164,6 +164,16 @@ void exit_creds(struct task_struct *tsk)
97209 validate_creds(cred);
97210 alter_cred_subscribers(cred, -1);
97211 put_cred(cred);
97212+
97213+#ifdef CONFIG_GRKERNSEC_SETXID
97214+ cred = (struct cred *) tsk->delayed_cred;
97215+ if (cred != NULL) {
97216+ tsk->delayed_cred = NULL;
97217+ validate_creds(cred);
97218+ alter_cred_subscribers(cred, -1);
97219+ put_cred(cred);
97220+ }
97221+#endif
97222 }
97223
97224 /**
97225@@ -411,7 +421,7 @@ static bool cred_cap_issubset(const struct cred *set, const struct cred *subset)
97226 * Always returns 0 thus allowing this function to be tail-called at the end
97227 * of, say, sys_setgid().
97228 */
97229-int commit_creds(struct cred *new)
97230+static int __commit_creds(struct cred *new)
97231 {
97232 struct task_struct *task = current;
97233 const struct cred *old = task->real_cred;
97234@@ -430,6 +440,8 @@ int commit_creds(struct cred *new)
97235
97236 get_cred(new); /* we will require a ref for the subj creds too */
97237
97238+ gr_set_role_label(task, new->uid, new->gid);
97239+
97240 /* dumpability changes */
97241 if (!uid_eq(old->euid, new->euid) ||
97242 !gid_eq(old->egid, new->egid) ||
97243@@ -479,6 +491,105 @@ int commit_creds(struct cred *new)
97244 put_cred(old);
97245 return 0;
97246 }
97247+#ifdef CONFIG_GRKERNSEC_SETXID
97248+extern int set_user(struct cred *new);
97249+
97250+void gr_delayed_cred_worker(void)
97251+{
97252+ const struct cred *new = current->delayed_cred;
97253+ struct cred *ncred;
97254+
97255+ current->delayed_cred = NULL;
97256+
97257+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID) && new != NULL) {
97258+ // from doing get_cred on it when queueing this
97259+ put_cred(new);
97260+ return;
97261+ } else if (new == NULL)
97262+ return;
97263+
97264+ ncred = prepare_creds();
97265+ if (!ncred)
97266+ goto die;
97267+ // uids
97268+ ncred->uid = new->uid;
97269+ ncred->euid = new->euid;
97270+ ncred->suid = new->suid;
97271+ ncred->fsuid = new->fsuid;
97272+ // gids
97273+ ncred->gid = new->gid;
97274+ ncred->egid = new->egid;
97275+ ncred->sgid = new->sgid;
97276+ ncred->fsgid = new->fsgid;
97277+ // groups
97278+ set_groups(ncred, new->group_info);
97279+ // caps
97280+ ncred->securebits = new->securebits;
97281+ ncred->cap_inheritable = new->cap_inheritable;
97282+ ncred->cap_permitted = new->cap_permitted;
97283+ ncred->cap_effective = new->cap_effective;
97284+ ncred->cap_bset = new->cap_bset;
97285+
97286+ if (set_user(ncred)) {
97287+ abort_creds(ncred);
97288+ goto die;
97289+ }
97290+
97291+ // from doing get_cred on it when queueing this
97292+ put_cred(new);
97293+
97294+ __commit_creds(ncred);
97295+ return;
97296+die:
97297+ // from doing get_cred on it when queueing this
97298+ put_cred(new);
97299+ do_group_exit(SIGKILL);
97300+}
97301+#endif
97302+
97303+int commit_creds(struct cred *new)
97304+{
97305+#ifdef CONFIG_GRKERNSEC_SETXID
97306+ int ret;
97307+ int schedule_it = 0;
97308+ struct task_struct *t;
97309+ unsigned oldsecurebits = current_cred()->securebits;
97310+
97311+ /* we won't get called with tasklist_lock held for writing
97312+ and interrupts disabled as the cred struct in that case is
97313+ init_cred
97314+ */
97315+ if (grsec_enable_setxid && !current_is_single_threaded() &&
97316+ uid_eq(current_uid(), GLOBAL_ROOT_UID) &&
97317+ !uid_eq(new->uid, GLOBAL_ROOT_UID)) {
97318+ schedule_it = 1;
97319+ }
97320+ ret = __commit_creds(new);
97321+ if (schedule_it) {
97322+ rcu_read_lock();
97323+ read_lock(&tasklist_lock);
97324+ for (t = next_thread(current); t != current;
97325+ t = next_thread(t)) {
97326+ /* we'll check if the thread has uid 0 in
97327+ * the delayed worker routine
97328+ */
97329+ if (task_securebits(t) == oldsecurebits &&
97330+ t->delayed_cred == NULL) {
97331+ t->delayed_cred = get_cred(new);
97332+ set_tsk_thread_flag(t, TIF_GRSEC_SETXID);
97333+ set_tsk_need_resched(t);
97334+ }
97335+ }
97336+ read_unlock(&tasklist_lock);
97337+ rcu_read_unlock();
97338+ }
97339+
97340+ return ret;
97341+#else
97342+ return __commit_creds(new);
97343+#endif
97344+}
97345+
97346 EXPORT_SYMBOL(commit_creds);
97347
97348 /**
97349diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
97350index 0874e2e..5b32cc9 100644
97351--- a/kernel/debug/debug_core.c
97352+++ b/kernel/debug/debug_core.c
97353@@ -127,7 +127,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
97354 */
97355 static atomic_t masters_in_kgdb;
97356 static atomic_t slaves_in_kgdb;
97357-static atomic_t kgdb_break_tasklet_var;
97358+static atomic_unchecked_t kgdb_break_tasklet_var;
97359 atomic_t kgdb_setting_breakpoint;
97360
97361 struct task_struct *kgdb_usethread;
97362@@ -137,7 +137,7 @@ int kgdb_single_step;
97363 static pid_t kgdb_sstep_pid;
97364
97365 /* to keep track of the CPU which is doing the single stepping*/
97366-atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
97367+atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
97368
97369 /*
97370 * If you are debugging a problem where roundup (the collection of
97371@@ -552,7 +552,7 @@ return_normal:
97372 * kernel will only try for the value of sstep_tries before
97373 * giving up and continuing on.
97374 */
97375- if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
97376+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
97377 (kgdb_info[cpu].task &&
97378 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
97379 atomic_set(&kgdb_active, -1);
97380@@ -654,8 +654,8 @@ cpu_master_loop:
97381 }
97382
97383 kgdb_restore:
97384- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
97385- int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
97386+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
97387+ int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
97388 if (kgdb_info[sstep_cpu].task)
97389 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
97390 else
97391@@ -949,18 +949,18 @@ static void kgdb_unregister_callbacks(void)
97392 static void kgdb_tasklet_bpt(unsigned long ing)
97393 {
97394 kgdb_breakpoint();
97395- atomic_set(&kgdb_break_tasklet_var, 0);
97396+ atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
97397 }
97398
97399 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
97400
97401 void kgdb_schedule_breakpoint(void)
97402 {
97403- if (atomic_read(&kgdb_break_tasklet_var) ||
97404+ if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
97405 atomic_read(&kgdb_active) != -1 ||
97406 atomic_read(&kgdb_setting_breakpoint))
97407 return;
97408- atomic_inc(&kgdb_break_tasklet_var);
97409+ atomic_inc_unchecked(&kgdb_break_tasklet_var);
97410 tasklet_schedule(&kgdb_tasklet_breakpoint);
97411 }
97412 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
97413diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
97414index 41213454..861e178 100644
97415--- a/kernel/debug/kdb/kdb_main.c
97416+++ b/kernel/debug/kdb/kdb_main.c
97417@@ -2021,7 +2021,7 @@ static int kdb_lsmod(int argc, const char **argv)
97418 continue;
97419
97420 kdb_printf("%-20s%8u 0x%p ", mod->name,
97421- mod->core_size, (void *)mod);
97422+ mod->core_size_rx + mod->core_size_rw, (void *)mod);
97423 #ifdef CONFIG_MODULE_UNLOAD
97424 kdb_printf("%4d ", module_refcount(mod));
97425 #endif
97426@@ -2031,7 +2031,7 @@ static int kdb_lsmod(int argc, const char **argv)
97427 kdb_printf(" (Loading)");
97428 else
97429 kdb_printf(" (Live)");
97430- kdb_printf(" 0x%p", mod->module_core);
97431+ kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
97432
97433 #ifdef CONFIG_MODULE_UNLOAD
97434 {
97435diff --git a/kernel/events/core.c b/kernel/events/core.c
97436index 2fabc06..79cceec 100644
97437--- a/kernel/events/core.c
97438+++ b/kernel/events/core.c
97439@@ -170,8 +170,15 @@ static struct srcu_struct pmus_srcu;
97440 * 0 - disallow raw tracepoint access for unpriv
97441 * 1 - disallow cpu events for unpriv
97442 * 2 - disallow kernel profiling for unpriv
97443+ * 3 - disallow all unpriv perf event use
97444 */
97445-int sysctl_perf_event_paranoid __read_mostly = 1;
97446+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
97447+int sysctl_perf_event_legitimately_concerned __read_mostly = 3;
97448+#elif defined(CONFIG_GRKERNSEC_HIDESYM)
97449+int sysctl_perf_event_legitimately_concerned __read_mostly = 2;
97450+#else
97451+int sysctl_perf_event_legitimately_concerned __read_mostly = 1;
97452+#endif
97453
97454 /* Minimum for 512 kiB + 1 user control page */
97455 int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */
97456@@ -197,7 +204,7 @@ void update_perf_cpu_limits(void)
97457
97458 tmp *= sysctl_perf_cpu_time_max_percent;
97459 do_div(tmp, 100);
97460- ACCESS_ONCE(perf_sample_allowed_ns) = tmp;
97461+ ACCESS_ONCE_RW(perf_sample_allowed_ns) = tmp;
97462 }
97463
97464 static int perf_rotate_context(struct perf_cpu_context *cpuctx);
97465@@ -303,7 +310,7 @@ void perf_sample_event_took(u64 sample_len_ns)
97466 }
97467 }
97468
97469-static atomic64_t perf_event_id;
97470+static atomic64_unchecked_t perf_event_id;
97471
97472 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
97473 enum event_type_t event_type);
97474@@ -3220,7 +3227,7 @@ static void __perf_event_read(void *info)
97475
97476 static inline u64 perf_event_count(struct perf_event *event)
97477 {
97478- return local64_read(&event->count) + atomic64_read(&event->child_count);
97479+ return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
97480 }
97481
97482 static u64 perf_event_read(struct perf_event *event)
97483@@ -3656,9 +3663,9 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
97484 mutex_lock(&event->child_mutex);
97485 total += perf_event_read(event);
97486 *enabled += event->total_time_enabled +
97487- atomic64_read(&event->child_total_time_enabled);
97488+ atomic64_read_unchecked(&event->child_total_time_enabled);
97489 *running += event->total_time_running +
97490- atomic64_read(&event->child_total_time_running);
97491+ atomic64_read_unchecked(&event->child_total_time_running);
97492
97493 list_for_each_entry(child, &event->child_list, child_list) {
97494 total += perf_event_read(child);
97495@@ -4147,10 +4154,10 @@ void perf_event_update_userpage(struct perf_event *event)
97496 userpg->offset -= local64_read(&event->hw.prev_count);
97497
97498 userpg->time_enabled = enabled +
97499- atomic64_read(&event->child_total_time_enabled);
97500+ atomic64_read_unchecked(&event->child_total_time_enabled);
97501
97502 userpg->time_running = running +
97503- atomic64_read(&event->child_total_time_running);
97504+ atomic64_read_unchecked(&event->child_total_time_running);
97505
97506 arch_perf_update_userpage(event, userpg, now);
97507
97508@@ -4740,7 +4747,7 @@ perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size,
97509
97510 /* Data. */
97511 sp = perf_user_stack_pointer(regs);
97512- rem = __output_copy_user(handle, (void *) sp, dump_size);
97513+ rem = __output_copy_user(handle, (void __user *) sp, dump_size);
97514 dyn_size = dump_size - rem;
97515
97516 perf_output_skip(handle, rem);
97517@@ -4831,11 +4838,11 @@ static void perf_output_read_one(struct perf_output_handle *handle,
97518 values[n++] = perf_event_count(event);
97519 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
97520 values[n++] = enabled +
97521- atomic64_read(&event->child_total_time_enabled);
97522+ atomic64_read_unchecked(&event->child_total_time_enabled);
97523 }
97524 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
97525 values[n++] = running +
97526- atomic64_read(&event->child_total_time_running);
97527+ atomic64_read_unchecked(&event->child_total_time_running);
97528 }
97529 if (read_format & PERF_FORMAT_ID)
97530 values[n++] = primary_event_id(event);
97531@@ -7180,7 +7187,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
97532 event->parent = parent_event;
97533
97534 event->ns = get_pid_ns(task_active_pid_ns(current));
97535- event->id = atomic64_inc_return(&perf_event_id);
97536+ event->id = atomic64_inc_return_unchecked(&perf_event_id);
97537
97538 event->state = PERF_EVENT_STATE_INACTIVE;
97539
97540@@ -7470,6 +7477,11 @@ SYSCALL_DEFINE5(perf_event_open,
97541 if (flags & ~PERF_FLAG_ALL)
97542 return -EINVAL;
97543
97544+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
97545+ if (perf_paranoid_any() && !capable(CAP_SYS_ADMIN))
97546+ return -EACCES;
97547+#endif
97548+
97549 err = perf_copy_attr(attr_uptr, &attr);
97550 if (err)
97551 return err;
97552@@ -7892,10 +7904,10 @@ static void sync_child_event(struct perf_event *child_event,
97553 /*
97554 * Add back the child's count to the parent's count:
97555 */
97556- atomic64_add(child_val, &parent_event->child_count);
97557- atomic64_add(child_event->total_time_enabled,
97558+ atomic64_add_unchecked(child_val, &parent_event->child_count);
97559+ atomic64_add_unchecked(child_event->total_time_enabled,
97560 &parent_event->child_total_time_enabled);
97561- atomic64_add(child_event->total_time_running,
97562+ atomic64_add_unchecked(child_event->total_time_running,
97563 &parent_event->child_total_time_running);
97564
97565 /*
97566diff --git a/kernel/events/internal.h b/kernel/events/internal.h
97567index 569b2187..19940d9 100644
97568--- a/kernel/events/internal.h
97569+++ b/kernel/events/internal.h
97570@@ -81,10 +81,10 @@ static inline unsigned long perf_data_size(struct ring_buffer *rb)
97571 return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
97572 }
97573
97574-#define DEFINE_OUTPUT_COPY(func_name, memcpy_func) \
97575+#define DEFINE_OUTPUT_COPY(func_name, memcpy_func, user) \
97576 static inline unsigned long \
97577 func_name(struct perf_output_handle *handle, \
97578- const void *buf, unsigned long len) \
97579+ const void user *buf, unsigned long len) \
97580 { \
97581 unsigned long size, written; \
97582 \
97583@@ -117,7 +117,7 @@ memcpy_common(void *dst, const void *src, unsigned long n)
97584 return 0;
97585 }
97586
97587-DEFINE_OUTPUT_COPY(__output_copy, memcpy_common)
97588+DEFINE_OUTPUT_COPY(__output_copy, memcpy_common, )
97589
97590 static inline unsigned long
97591 memcpy_skip(void *dst, const void *src, unsigned long n)
97592@@ -125,7 +125,7 @@ memcpy_skip(void *dst, const void *src, unsigned long n)
97593 return 0;
97594 }
97595
97596-DEFINE_OUTPUT_COPY(__output_skip, memcpy_skip)
97597+DEFINE_OUTPUT_COPY(__output_skip, memcpy_skip, )
97598
97599 #ifndef arch_perf_out_copy_user
97600 #define arch_perf_out_copy_user arch_perf_out_copy_user
97601@@ -143,7 +143,7 @@ arch_perf_out_copy_user(void *dst, const void *src, unsigned long n)
97602 }
97603 #endif
97604
97605-DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user)
97606+DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user, __user)
97607
97608 /* Callchain handling */
97609 extern struct perf_callchain_entry *
97610diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
97611index cb346f2..e4dc317 100644
97612--- a/kernel/events/uprobes.c
97613+++ b/kernel/events/uprobes.c
97614@@ -1670,7 +1670,7 @@ static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr)
97615 {
97616 struct page *page;
97617 uprobe_opcode_t opcode;
97618- int result;
97619+ long result;
97620
97621 pagefault_disable();
97622 result = __copy_from_user_inatomic(&opcode, (void __user*)vaddr,
97623diff --git a/kernel/exit.c b/kernel/exit.c
97624index feff10b..f623dd5 100644
97625--- a/kernel/exit.c
97626+++ b/kernel/exit.c
97627@@ -171,6 +171,10 @@ void release_task(struct task_struct *p)
97628 struct task_struct *leader;
97629 int zap_leader;
97630 repeat:
97631+#ifdef CONFIG_NET
97632+ gr_del_task_from_ip_table(p);
97633+#endif
97634+
97635 /* don't need to get the RCU readlock here - the process is dead and
97636 * can't be modifying its own credentials. But shut RCU-lockdep up */
97637 rcu_read_lock();
97638@@ -656,6 +660,8 @@ void do_exit(long code)
97639 int group_dead;
97640 TASKS_RCU(int tasks_rcu_i);
97641
97642+ set_fs(USER_DS);
97643+
97644 profile_task_exit(tsk);
97645
97646 WARN_ON(blk_needs_flush_plug(tsk));
97647@@ -672,7 +678,6 @@ void do_exit(long code)
97648 * mm_release()->clear_child_tid() from writing to a user-controlled
97649 * kernel address.
97650 */
97651- set_fs(USER_DS);
97652
97653 ptrace_event(PTRACE_EVENT_EXIT, code);
97654
97655@@ -730,6 +735,9 @@ void do_exit(long code)
97656 tsk->exit_code = code;
97657 taskstats_exit(tsk, group_dead);
97658
97659+ gr_acl_handle_psacct(tsk, code);
97660+ gr_acl_handle_exit();
97661+
97662 exit_mm(tsk);
97663
97664 if (group_dead)
97665@@ -849,7 +857,7 @@ SYSCALL_DEFINE1(exit, int, error_code)
97666 * Take down every thread in the group. This is called by fatal signals
97667 * as well as by sys_exit_group (below).
97668 */
97669-void
97670+__noreturn void
97671 do_group_exit(int exit_code)
97672 {
97673 struct signal_struct *sig = current->signal;
97674diff --git a/kernel/fork.c b/kernel/fork.c
97675index cf65139..704476e 100644
97676--- a/kernel/fork.c
97677+++ b/kernel/fork.c
97678@@ -177,12 +177,54 @@ static void free_thread_info(struct thread_info *ti)
97679 void thread_info_cache_init(void)
97680 {
97681 thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE,
97682- THREAD_SIZE, 0, NULL);
97683+ THREAD_SIZE, SLAB_USERCOPY, NULL);
97684 BUG_ON(thread_info_cache == NULL);
97685 }
97686 # endif
97687 #endif
97688
97689+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
97690+static inline struct thread_info *gr_alloc_thread_info_node(struct task_struct *tsk,
97691+ int node, void **lowmem_stack)
97692+{
97693+ struct page *pages[THREAD_SIZE / PAGE_SIZE];
97694+ void *ret = NULL;
97695+ unsigned int i;
97696+
97697+ *lowmem_stack = alloc_thread_info_node(tsk, node);
97698+ if (*lowmem_stack == NULL)
97699+ goto out;
97700+
97701+ for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++)
97702+ pages[i] = virt_to_page(*lowmem_stack + (i * PAGE_SIZE));
97703+
97704+ /* use VM_IOREMAP to gain THREAD_SIZE alignment */
97705+ ret = vmap(pages, THREAD_SIZE / PAGE_SIZE, VM_IOREMAP, PAGE_KERNEL);
97706+ if (ret == NULL) {
97707+ free_thread_info(*lowmem_stack);
97708+ *lowmem_stack = NULL;
97709+ }
97710+
97711+out:
97712+ return ret;
97713+}
97714+
97715+static inline void gr_free_thread_info(struct task_struct *tsk, struct thread_info *ti)
97716+{
97717+ unmap_process_stacks(tsk);
97718+}
97719+#else
97720+static inline struct thread_info *gr_alloc_thread_info_node(struct task_struct *tsk,
97721+ int node, void **lowmem_stack)
97722+{
97723+ return alloc_thread_info_node(tsk, node);
97724+}
97725+static inline void gr_free_thread_info(struct task_struct *tsk, struct thread_info *ti)
97726+{
97727+ free_thread_info(ti);
97728+}
97729+#endif
97730+
97731 /* SLAB cache for signal_struct structures (tsk->signal) */
97732 static struct kmem_cache *signal_cachep;
97733
97734@@ -201,18 +243,22 @@ struct kmem_cache *vm_area_cachep;
97735 /* SLAB cache for mm_struct structures (tsk->mm) */
97736 static struct kmem_cache *mm_cachep;
97737
97738-static void account_kernel_stack(struct thread_info *ti, int account)
97739+static void account_kernel_stack(struct task_struct *tsk, struct thread_info *ti, int account)
97740 {
97741+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
97742+ struct zone *zone = page_zone(virt_to_page(tsk->lowmem_stack));
97743+#else
97744 struct zone *zone = page_zone(virt_to_page(ti));
97745+#endif
97746
97747 mod_zone_page_state(zone, NR_KERNEL_STACK, account);
97748 }
97749
97750 void free_task(struct task_struct *tsk)
97751 {
97752- account_kernel_stack(tsk->stack, -1);
97753+ account_kernel_stack(tsk, tsk->stack, -1);
97754 arch_release_thread_info(tsk->stack);
97755- free_thread_info(tsk->stack);
97756+ gr_free_thread_info(tsk, tsk->stack);
97757 rt_mutex_debug_task_free(tsk);
97758 ftrace_graph_exit_task(tsk);
97759 put_seccomp_filter(tsk);
97760@@ -306,6 +352,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
97761 {
97762 struct task_struct *tsk;
97763 struct thread_info *ti;
97764+ void *lowmem_stack;
97765 int node = tsk_fork_get_node(orig);
97766 int err;
97767
97768@@ -313,7 +360,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
97769 if (!tsk)
97770 return NULL;
97771
97772- ti = alloc_thread_info_node(tsk, node);
97773+ ti = gr_alloc_thread_info_node(tsk, node, &lowmem_stack);
97774 if (!ti)
97775 goto free_tsk;
97776
97777@@ -322,6 +369,9 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
97778 goto free_ti;
97779
97780 tsk->stack = ti;
97781+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
97782+ tsk->lowmem_stack = lowmem_stack;
97783+#endif
97784 #ifdef CONFIG_SECCOMP
97785 /*
97786 * We must handle setting up seccomp filters once we're under
97787@@ -338,7 +388,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
97788 set_task_stack_end_magic(tsk);
97789
97790 #ifdef CONFIG_CC_STACKPROTECTOR
97791- tsk->stack_canary = get_random_int();
97792+ tsk->stack_canary = pax_get_random_long();
97793 #endif
97794
97795 /*
97796@@ -352,24 +402,89 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
97797 tsk->splice_pipe = NULL;
97798 tsk->task_frag.page = NULL;
97799
97800- account_kernel_stack(ti, 1);
97801+ account_kernel_stack(tsk, ti, 1);
97802
97803 return tsk;
97804
97805 free_ti:
97806- free_thread_info(ti);
97807+ gr_free_thread_info(tsk, ti);
97808 free_tsk:
97809 free_task_struct(tsk);
97810 return NULL;
97811 }
97812
97813 #ifdef CONFIG_MMU
97814-static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
97815+static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct mm_struct *oldmm, struct vm_area_struct *mpnt)
97816+{
97817+ struct vm_area_struct *tmp;
97818+ unsigned long charge;
97819+ struct file *file;
97820+ int retval;
97821+
97822+ charge = 0;
97823+ if (mpnt->vm_flags & VM_ACCOUNT) {
97824+ unsigned long len = vma_pages(mpnt);
97825+
97826+ if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
97827+ goto fail_nomem;
97828+ charge = len;
97829+ }
97830+ tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
97831+ if (!tmp)
97832+ goto fail_nomem;
97833+ *tmp = *mpnt;
97834+ tmp->vm_mm = mm;
97835+ INIT_LIST_HEAD(&tmp->anon_vma_chain);
97836+ retval = vma_dup_policy(mpnt, tmp);
97837+ if (retval)
97838+ goto fail_nomem_policy;
97839+ if (anon_vma_fork(tmp, mpnt))
97840+ goto fail_nomem_anon_vma_fork;
97841+ tmp->vm_flags &= ~VM_LOCKED;
97842+ tmp->vm_next = tmp->vm_prev = NULL;
97843+ tmp->vm_mirror = NULL;
97844+ file = tmp->vm_file;
97845+ if (file) {
97846+ struct inode *inode = file_inode(file);
97847+ struct address_space *mapping = file->f_mapping;
97848+
97849+ get_file(file);
97850+ if (tmp->vm_flags & VM_DENYWRITE)
97851+ atomic_dec(&inode->i_writecount);
97852+ i_mmap_lock_write(mapping);
97853+ if (tmp->vm_flags & VM_SHARED)
97854+ atomic_inc(&mapping->i_mmap_writable);
97855+ flush_dcache_mmap_lock(mapping);
97856+ /* insert tmp into the share list, just after mpnt */
97857+ vma_interval_tree_insert_after(tmp, mpnt, &mapping->i_mmap);
97858+ flush_dcache_mmap_unlock(mapping);
97859+ i_mmap_unlock_write(mapping);
97860+ }
97861+
97862+ /*
97863+ * Clear hugetlb-related page reserves for children. This only
97864+ * affects MAP_PRIVATE mappings. Faults generated by the child
97865+ * are not guaranteed to succeed, even if read-only
97866+ */
97867+ if (is_vm_hugetlb_page(tmp))
97868+ reset_vma_resv_huge_pages(tmp);
97869+
97870+ return tmp;
97871+
97872+fail_nomem_anon_vma_fork:
97873+ mpol_put(vma_policy(tmp));
97874+fail_nomem_policy:
97875+ kmem_cache_free(vm_area_cachep, tmp);
97876+fail_nomem:
97877+ vm_unacct_memory(charge);
97878+ return NULL;
97879+}
97880+
97881+static __latent_entropy int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
97882 {
97883 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
97884 struct rb_node **rb_link, *rb_parent;
97885 int retval;
97886- unsigned long charge;
97887
97888 uprobe_start_dup_mmap();
97889 down_write(&oldmm->mmap_sem);
97890@@ -397,51 +512,15 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
97891
97892 prev = NULL;
97893 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
97894- struct file *file;
97895-
97896 if (mpnt->vm_flags & VM_DONTCOPY) {
97897 vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file,
97898 -vma_pages(mpnt));
97899 continue;
97900 }
97901- charge = 0;
97902- if (mpnt->vm_flags & VM_ACCOUNT) {
97903- unsigned long len = vma_pages(mpnt);
97904-
97905- if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
97906- goto fail_nomem;
97907- charge = len;
97908- }
97909- tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
97910- if (!tmp)
97911- goto fail_nomem;
97912- *tmp = *mpnt;
97913- INIT_LIST_HEAD(&tmp->anon_vma_chain);
97914- retval = vma_dup_policy(mpnt, tmp);
97915- if (retval)
97916- goto fail_nomem_policy;
97917- tmp->vm_mm = mm;
97918- if (anon_vma_fork(tmp, mpnt))
97919- goto fail_nomem_anon_vma_fork;
97920- tmp->vm_flags &= ~VM_LOCKED;
97921- tmp->vm_next = tmp->vm_prev = NULL;
97922- file = tmp->vm_file;
97923- if (file) {
97924- struct inode *inode = file_inode(file);
97925- struct address_space *mapping = file->f_mapping;
97926-
97927- get_file(file);
97928- if (tmp->vm_flags & VM_DENYWRITE)
97929- atomic_dec(&inode->i_writecount);
97930- i_mmap_lock_write(mapping);
97931- if (tmp->vm_flags & VM_SHARED)
97932- atomic_inc(&mapping->i_mmap_writable);
97933- flush_dcache_mmap_lock(mapping);
97934- /* insert tmp into the share list, just after mpnt */
97935- vma_interval_tree_insert_after(tmp, mpnt,
97936- &mapping->i_mmap);
97937- flush_dcache_mmap_unlock(mapping);
97938- i_mmap_unlock_write(mapping);
97939+ tmp = dup_vma(mm, oldmm, mpnt);
97940+ if (!tmp) {
97941+ retval = -ENOMEM;
97942+ goto out;
97943 }
97944
97945 /*
97946@@ -473,6 +552,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
97947 if (retval)
97948 goto out;
97949 }
97950+
97951+#ifdef CONFIG_PAX_SEGMEXEC
97952+ if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
97953+ struct vm_area_struct *mpnt_m;
97954+
97955+ for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
97956+ BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
97957+
97958+ if (!mpnt->vm_mirror)
97959+ continue;
97960+
97961+ if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
97962+ BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
97963+ mpnt->vm_mirror = mpnt_m;
97964+ } else {
97965+ BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
97966+ mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
97967+ mpnt_m->vm_mirror->vm_mirror = mpnt_m;
97968+ mpnt->vm_mirror->vm_mirror = mpnt;
97969+ }
97970+ }
97971+ BUG_ON(mpnt_m);
97972+ }
97973+#endif
97974+
97975 /* a new mm has just been created */
97976 arch_dup_mmap(oldmm, mm);
97977 retval = 0;
97978@@ -482,14 +586,6 @@ out:
97979 up_write(&oldmm->mmap_sem);
97980 uprobe_end_dup_mmap();
97981 return retval;
97982-fail_nomem_anon_vma_fork:
97983- mpol_put(vma_policy(tmp));
97984-fail_nomem_policy:
97985- kmem_cache_free(vm_area_cachep, tmp);
97986-fail_nomem:
97987- retval = -ENOMEM;
97988- vm_unacct_memory(charge);
97989- goto out;
97990 }
97991
97992 static inline int mm_alloc_pgd(struct mm_struct *mm)
97993@@ -739,8 +835,8 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
97994 return ERR_PTR(err);
97995
97996 mm = get_task_mm(task);
97997- if (mm && mm != current->mm &&
97998- !ptrace_may_access(task, mode)) {
97999+ if (mm && ((mm != current->mm && !ptrace_may_access(task, mode)) ||
98000+ (mode == PTRACE_MODE_ATTACH && (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))))) {
98001 mmput(mm);
98002 mm = ERR_PTR(-EACCES);
98003 }
98004@@ -943,13 +1039,20 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
98005 spin_unlock(&fs->lock);
98006 return -EAGAIN;
98007 }
98008- fs->users++;
98009+ atomic_inc(&fs->users);
98010 spin_unlock(&fs->lock);
98011 return 0;
98012 }
98013 tsk->fs = copy_fs_struct(fs);
98014 if (!tsk->fs)
98015 return -ENOMEM;
98016+ /* Carry through gr_chroot_dentry and is_chrooted instead
98017+ of recomputing it here. Already copied when the task struct
98018+ is duplicated. This allows pivot_root to not be treated as
98019+ a chroot
98020+ */
98021+ //gr_set_chroot_entries(tsk, &tsk->fs->root);
98022+
98023 return 0;
98024 }
98025
98026@@ -1187,7 +1290,7 @@ init_task_pid(struct task_struct *task, enum pid_type type, struct pid *pid)
98027 * parts of the process environment (as per the clone
98028 * flags). The actual kick-off is left to the caller.
98029 */
98030-static struct task_struct *copy_process(unsigned long clone_flags,
98031+static __latent_entropy struct task_struct *copy_process(unsigned long clone_flags,
98032 unsigned long stack_start,
98033 unsigned long stack_size,
98034 int __user *child_tidptr,
98035@@ -1258,6 +1361,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
98036 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
98037 #endif
98038 retval = -EAGAIN;
98039+
98040+ gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
98041+
98042 if (atomic_read(&p->real_cred->user->processes) >=
98043 task_rlimit(p, RLIMIT_NPROC)) {
98044 if (p->real_cred->user != INIT_USER &&
98045@@ -1507,6 +1613,11 @@ static struct task_struct *copy_process(unsigned long clone_flags,
98046 goto bad_fork_free_pid;
98047 }
98048
98049+ /* synchronizes with gr_set_acls()
98050+ we need to call this past the point of no return for fork()
98051+ */
98052+ gr_copy_label(p);
98053+
98054 if (likely(p->pid)) {
98055 ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace);
98056
98057@@ -1597,6 +1708,8 @@ bad_fork_cleanup_count:
98058 bad_fork_free:
98059 free_task(p);
98060 fork_out:
98061+ gr_log_forkfail(retval);
98062+
98063 return ERR_PTR(retval);
98064 }
98065
98066@@ -1658,6 +1771,7 @@ long do_fork(unsigned long clone_flags,
98067
98068 p = copy_process(clone_flags, stack_start, stack_size,
98069 child_tidptr, NULL, trace);
98070+ add_latent_entropy();
98071 /*
98072 * Do this prior waking up the new thread - the thread pointer
98073 * might get invalid after that point, if the thread exits quickly.
98074@@ -1674,6 +1788,8 @@ long do_fork(unsigned long clone_flags,
98075 if (clone_flags & CLONE_PARENT_SETTID)
98076 put_user(nr, parent_tidptr);
98077
98078+ gr_handle_brute_check();
98079+
98080 if (clone_flags & CLONE_VFORK) {
98081 p->vfork_done = &vfork;
98082 init_completion(&vfork);
98083@@ -1792,7 +1908,7 @@ void __init proc_caches_init(void)
98084 mm_cachep = kmem_cache_create("mm_struct",
98085 sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
98086 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
98087- vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC);
98088+ vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC | SLAB_NO_SANITIZE);
98089 mmap_init();
98090 nsproxy_cache_init();
98091 }
98092@@ -1832,7 +1948,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
98093 return 0;
98094
98095 /* don't need lock here; in the worst case we'll do useless copy */
98096- if (fs->users == 1)
98097+ if (atomic_read(&fs->users) == 1)
98098 return 0;
98099
98100 *new_fsp = copy_fs_struct(fs);
98101@@ -1944,7 +2060,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
98102 fs = current->fs;
98103 spin_lock(&fs->lock);
98104 current->fs = new_fs;
98105- if (--fs->users)
98106+ gr_set_chroot_entries(current, &current->fs->root);
98107+ if (atomic_dec_return(&fs->users))
98108 new_fs = NULL;
98109 else
98110 new_fs = fs;
98111diff --git a/kernel/futex.c b/kernel/futex.c
98112index 2a5e383..878bac6 100644
98113--- a/kernel/futex.c
98114+++ b/kernel/futex.c
98115@@ -201,7 +201,7 @@ struct futex_pi_state {
98116 atomic_t refcount;
98117
98118 union futex_key key;
98119-};
98120+} __randomize_layout;
98121
98122 /**
98123 * struct futex_q - The hashed futex queue entry, one per waiting task
98124@@ -235,7 +235,7 @@ struct futex_q {
98125 struct rt_mutex_waiter *rt_waiter;
98126 union futex_key *requeue_pi_key;
98127 u32 bitset;
98128-};
98129+} __randomize_layout;
98130
98131 static const struct futex_q futex_q_init = {
98132 /* list gets initialized in queue_me()*/
98133@@ -402,6 +402,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
98134 struct page *page, *page_head;
98135 int err, ro = 0;
98136
98137+#ifdef CONFIG_PAX_SEGMEXEC
98138+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
98139+ return -EFAULT;
98140+#endif
98141+
98142 /*
98143 * The futex address must be "naturally" aligned.
98144 */
98145@@ -601,7 +606,7 @@ static int cmpxchg_futex_value_locked(u32 *curval, u32 __user *uaddr,
98146
98147 static int get_futex_value_locked(u32 *dest, u32 __user *from)
98148 {
98149- int ret;
98150+ unsigned long ret;
98151
98152 pagefault_disable();
98153 ret = __copy_from_user_inatomic(dest, from, sizeof(u32));
98154@@ -3006,6 +3011,7 @@ static void __init futex_detect_cmpxchg(void)
98155 {
98156 #ifndef CONFIG_HAVE_FUTEX_CMPXCHG
98157 u32 curval;
98158+ mm_segment_t oldfs;
98159
98160 /*
98161 * This will fail and we want it. Some arch implementations do
98162@@ -3017,8 +3023,11 @@ static void __init futex_detect_cmpxchg(void)
98163 * implementation, the non-functional ones will return
98164 * -ENOSYS.
98165 */
98166+ oldfs = get_fs();
98167+ set_fs(USER_DS);
98168 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
98169 futex_cmpxchg_enabled = 1;
98170+ set_fs(oldfs);
98171 #endif
98172 }
98173
98174diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
98175index 55c8c93..9ba7ad6 100644
98176--- a/kernel/futex_compat.c
98177+++ b/kernel/futex_compat.c
98178@@ -32,7 +32,7 @@ fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry,
98179 return 0;
98180 }
98181
98182-static void __user *futex_uaddr(struct robust_list __user *entry,
98183+static void __user __intentional_overflow(-1) *futex_uaddr(struct robust_list __user *entry,
98184 compat_long_t futex_offset)
98185 {
98186 compat_uptr_t base = ptr_to_compat(entry);
98187diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
98188index b358a80..fc25240 100644
98189--- a/kernel/gcov/base.c
98190+++ b/kernel/gcov/base.c
98191@@ -114,11 +114,6 @@ void gcov_enable_events(void)
98192 }
98193
98194 #ifdef CONFIG_MODULES
98195-static inline int within(void *addr, void *start, unsigned long size)
98196-{
98197- return ((addr >= start) && (addr < start + size));
98198-}
98199-
98200 /* Update list and generate events when modules are unloaded. */
98201 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
98202 void *data)
98203@@ -133,7 +128,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
98204
98205 /* Remove entries located in module from linked list. */
98206 while ((info = gcov_info_next(info))) {
98207- if (within(info, mod->module_core, mod->core_size)) {
98208+ if (within_module_core_rw((unsigned long)info, mod)) {
98209 gcov_info_unlink(prev, info);
98210 if (gcov_events_enabled)
98211 gcov_event(GCOV_REMOVE, info);
98212diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
98213index 886d09e..c7ff4e5 100644
98214--- a/kernel/irq/manage.c
98215+++ b/kernel/irq/manage.c
98216@@ -874,7 +874,7 @@ static int irq_thread(void *data)
98217
98218 action_ret = handler_fn(desc, action);
98219 if (action_ret == IRQ_HANDLED)
98220- atomic_inc(&desc->threads_handled);
98221+ atomic_inc_unchecked(&desc->threads_handled);
98222
98223 wake_threads_waitq(desc);
98224 }
98225diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
98226index e2514b0..de3dfe0 100644
98227--- a/kernel/irq/spurious.c
98228+++ b/kernel/irq/spurious.c
98229@@ -337,7 +337,7 @@ void note_interrupt(unsigned int irq, struct irq_desc *desc,
98230 * count. We just care about the count being
98231 * different than the one we saw before.
98232 */
98233- handled = atomic_read(&desc->threads_handled);
98234+ handled = atomic_read_unchecked(&desc->threads_handled);
98235 handled |= SPURIOUS_DEFERRED;
98236 if (handled != desc->threads_handled_last) {
98237 action_ret = IRQ_HANDLED;
98238diff --git a/kernel/jump_label.c b/kernel/jump_label.c
98239index 9019f15..9a3c42e 100644
98240--- a/kernel/jump_label.c
98241+++ b/kernel/jump_label.c
98242@@ -14,6 +14,7 @@
98243 #include <linux/err.h>
98244 #include <linux/static_key.h>
98245 #include <linux/jump_label_ratelimit.h>
98246+#include <linux/mm.h>
98247
98248 #ifdef HAVE_JUMP_LABEL
98249
98250@@ -51,7 +52,9 @@ jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
98251
98252 size = (((unsigned long)stop - (unsigned long)start)
98253 / sizeof(struct jump_entry));
98254+ pax_open_kernel();
98255 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
98256+ pax_close_kernel();
98257 }
98258
98259 static void jump_label_update(struct static_key *key, int enable);
98260@@ -363,10 +366,12 @@ static void jump_label_invalidate_module_init(struct module *mod)
98261 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
98262 struct jump_entry *iter;
98263
98264+ pax_open_kernel();
98265 for (iter = iter_start; iter < iter_stop; iter++) {
98266 if (within_module_init(iter->code, mod))
98267 iter->code = 0;
98268 }
98269+ pax_close_kernel();
98270 }
98271
98272 static int
98273diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
98274index 5c5987f..bc502b0 100644
98275--- a/kernel/kallsyms.c
98276+++ b/kernel/kallsyms.c
98277@@ -11,6 +11,9 @@
98278 * Changed the compression method from stem compression to "table lookup"
98279 * compression (see scripts/kallsyms.c for a more complete description)
98280 */
98281+#ifdef CONFIG_GRKERNSEC_HIDESYM
98282+#define __INCLUDED_BY_HIDESYM 1
98283+#endif
98284 #include <linux/kallsyms.h>
98285 #include <linux/module.h>
98286 #include <linux/init.h>
98287@@ -54,12 +57,33 @@ extern const unsigned long kallsyms_markers[] __weak;
98288
98289 static inline int is_kernel_inittext(unsigned long addr)
98290 {
98291+ if (system_state != SYSTEM_BOOTING)
98292+ return 0;
98293+
98294 if (addr >= (unsigned long)_sinittext
98295 && addr <= (unsigned long)_einittext)
98296 return 1;
98297 return 0;
98298 }
98299
98300+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
98301+#ifdef CONFIG_MODULES
98302+static inline int is_module_text(unsigned long addr)
98303+{
98304+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
98305+ return 1;
98306+
98307+ addr = ktla_ktva(addr);
98308+ return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
98309+}
98310+#else
98311+static inline int is_module_text(unsigned long addr)
98312+{
98313+ return 0;
98314+}
98315+#endif
98316+#endif
98317+
98318 static inline int is_kernel_text(unsigned long addr)
98319 {
98320 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
98321@@ -70,13 +94,28 @@ static inline int is_kernel_text(unsigned long addr)
98322
98323 static inline int is_kernel(unsigned long addr)
98324 {
98325+
98326+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
98327+ if (is_kernel_text(addr) || is_kernel_inittext(addr))
98328+ return 1;
98329+
98330+ if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
98331+#else
98332 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
98333+#endif
98334+
98335 return 1;
98336 return in_gate_area_no_mm(addr);
98337 }
98338
98339 static int is_ksym_addr(unsigned long addr)
98340 {
98341+
98342+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
98343+ if (is_module_text(addr))
98344+ return 0;
98345+#endif
98346+
98347 if (all_var)
98348 return is_kernel(addr);
98349
98350@@ -481,7 +520,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
98351
98352 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
98353 {
98354- iter->name[0] = '\0';
98355 iter->nameoff = get_symbol_offset(new_pos);
98356 iter->pos = new_pos;
98357 }
98358@@ -529,6 +567,11 @@ static int s_show(struct seq_file *m, void *p)
98359 {
98360 struct kallsym_iter *iter = m->private;
98361
98362+#ifdef CONFIG_GRKERNSEC_HIDESYM
98363+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID))
98364+ return 0;
98365+#endif
98366+
98367 /* Some debugging symbols have no name. Ignore them. */
98368 if (!iter->name[0])
98369 return 0;
98370@@ -542,6 +585,7 @@ static int s_show(struct seq_file *m, void *p)
98371 */
98372 type = iter->exported ? toupper(iter->type) :
98373 tolower(iter->type);
98374+
98375 seq_printf(m, "%pK %c %s\t[%s]\n", (void *)iter->value,
98376 type, iter->name, iter->module_name);
98377 } else
98378diff --git a/kernel/kcmp.c b/kernel/kcmp.c
98379index 0aa69ea..a7fcafb 100644
98380--- a/kernel/kcmp.c
98381+++ b/kernel/kcmp.c
98382@@ -100,6 +100,10 @@ SYSCALL_DEFINE5(kcmp, pid_t, pid1, pid_t, pid2, int, type,
98383 struct task_struct *task1, *task2;
98384 int ret;
98385
98386+#ifdef CONFIG_GRKERNSEC
98387+ return -ENOSYS;
98388+#endif
98389+
98390 rcu_read_lock();
98391
98392 /*
98393diff --git a/kernel/kexec.c b/kernel/kexec.c
98394index 38c25b1..12b3f69 100644
98395--- a/kernel/kexec.c
98396+++ b/kernel/kexec.c
98397@@ -1348,7 +1348,8 @@ COMPAT_SYSCALL_DEFINE4(kexec_load, compat_ulong_t, entry,
98398 compat_ulong_t, flags)
98399 {
98400 struct compat_kexec_segment in;
98401- struct kexec_segment out, __user *ksegments;
98402+ struct kexec_segment out;
98403+ struct kexec_segment __user *ksegments;
98404 unsigned long i, result;
98405
98406 /* Don't allow clients that don't understand the native
98407diff --git a/kernel/kmod.c b/kernel/kmod.c
98408index 2777f40..a689506 100644
98409--- a/kernel/kmod.c
98410+++ b/kernel/kmod.c
98411@@ -68,7 +68,7 @@ static void free_modprobe_argv(struct subprocess_info *info)
98412 kfree(info->argv);
98413 }
98414
98415-static int call_modprobe(char *module_name, int wait)
98416+static int call_modprobe(char *module_name, char *module_param, int wait)
98417 {
98418 struct subprocess_info *info;
98419 static char *envp[] = {
98420@@ -78,7 +78,7 @@ static int call_modprobe(char *module_name, int wait)
98421 NULL
98422 };
98423
98424- char **argv = kmalloc(sizeof(char *[5]), GFP_KERNEL);
98425+ char **argv = kmalloc(sizeof(char *[6]), GFP_KERNEL);
98426 if (!argv)
98427 goto out;
98428
98429@@ -90,7 +90,8 @@ static int call_modprobe(char *module_name, int wait)
98430 argv[1] = "-q";
98431 argv[2] = "--";
98432 argv[3] = module_name; /* check free_modprobe_argv() */
98433- argv[4] = NULL;
98434+ argv[4] = module_param;
98435+ argv[5] = NULL;
98436
98437 info = call_usermodehelper_setup(modprobe_path, argv, envp, GFP_KERNEL,
98438 NULL, free_modprobe_argv, NULL);
98439@@ -122,9 +123,8 @@ out:
98440 * If module auto-loading support is disabled then this function
98441 * becomes a no-operation.
98442 */
98443-int __request_module(bool wait, const char *fmt, ...)
98444+static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
98445 {
98446- va_list args;
98447 char module_name[MODULE_NAME_LEN];
98448 unsigned int max_modprobes;
98449 int ret;
98450@@ -143,9 +143,7 @@ int __request_module(bool wait, const char *fmt, ...)
98451 if (!modprobe_path[0])
98452 return 0;
98453
98454- va_start(args, fmt);
98455- ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
98456- va_end(args);
98457+ ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
98458 if (ret >= MODULE_NAME_LEN)
98459 return -ENAMETOOLONG;
98460
98461@@ -153,6 +151,20 @@ int __request_module(bool wait, const char *fmt, ...)
98462 if (ret)
98463 return ret;
98464
98465+#ifdef CONFIG_GRKERNSEC_MODHARDEN
98466+ if (uid_eq(current_uid(), GLOBAL_ROOT_UID)) {
98467+ /* hack to workaround consolekit/udisks stupidity */
98468+ read_lock(&tasklist_lock);
98469+ if (!strcmp(current->comm, "mount") &&
98470+ current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
98471+ read_unlock(&tasklist_lock);
98472+ printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
98473+ return -EPERM;
98474+ }
98475+ read_unlock(&tasklist_lock);
98476+ }
98477+#endif
98478+
98479 /* If modprobe needs a service that is in a module, we get a recursive
98480 * loop. Limit the number of running kmod threads to max_threads/2 or
98481 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
98482@@ -181,16 +193,61 @@ int __request_module(bool wait, const char *fmt, ...)
98483
98484 trace_module_request(module_name, wait, _RET_IP_);
98485
98486- ret = call_modprobe(module_name, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
98487+ ret = call_modprobe(module_name, module_param, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
98488
98489 atomic_dec(&kmod_concurrent);
98490 return ret;
98491 }
98492+
98493+int ___request_module(bool wait, char *module_param, const char *fmt, ...)
98494+{
98495+ va_list args;
98496+ int ret;
98497+
98498+ va_start(args, fmt);
98499+ ret = ____request_module(wait, module_param, fmt, args);
98500+ va_end(args);
98501+
98502+ return ret;
98503+}
98504+
98505+int __request_module(bool wait, const char *fmt, ...)
98506+{
98507+ va_list args;
98508+ int ret;
98509+
98510+#ifdef CONFIG_GRKERNSEC_MODHARDEN
98511+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID)) {
98512+ char module_param[MODULE_NAME_LEN];
98513+
98514+ memset(module_param, 0, sizeof(module_param));
98515+
98516+ snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", GR_GLOBAL_UID(current_uid()));
98517+
98518+ va_start(args, fmt);
98519+ ret = ____request_module(wait, module_param, fmt, args);
98520+ va_end(args);
98521+
98522+ return ret;
98523+ }
98524+#endif
98525+
98526+ va_start(args, fmt);
98527+ ret = ____request_module(wait, NULL, fmt, args);
98528+ va_end(args);
98529+
98530+ return ret;
98531+}
98532+
98533 EXPORT_SYMBOL(__request_module);
98534 #endif /* CONFIG_MODULES */
98535
98536 static void call_usermodehelper_freeinfo(struct subprocess_info *info)
98537 {
98538+#ifdef CONFIG_GRKERNSEC
98539+ kfree(info->path);
98540+ info->path = info->origpath;
98541+#endif
98542 if (info->cleanup)
98543 (*info->cleanup)(info);
98544 kfree(info);
98545@@ -232,6 +289,21 @@ static int ____call_usermodehelper(void *data)
98546 */
98547 set_user_nice(current, 0);
98548
98549+#ifdef CONFIG_GRKERNSEC
98550+ /* this is race-free as far as userland is concerned as we copied
98551+ out the path to be used prior to this point and are now operating
98552+ on that copy
98553+ */
98554+ if ((strncmp(sub_info->path, "/sbin/", 6) && strncmp(sub_info->path, "/usr/lib/", 9) &&
98555+ strncmp(sub_info->path, "/lib/", 5) && strncmp(sub_info->path, "/lib64/", 7) &&
98556+ strncmp(sub_info->path, "/usr/libexec/", 13) && strncmp(sub_info->path, "/usr/bin/", 9) &&
98557+ strcmp(sub_info->path, "/usr/share/apport/apport")) || strstr(sub_info->path, "..")) {
98558+ printk(KERN_ALERT "grsec: denied exec of usermode helper binary %.950s located outside of permitted system paths\n", sub_info->path);
98559+ retval = -EPERM;
98560+ goto out;
98561+ }
98562+#endif
98563+
98564 retval = -ENOMEM;
98565 new = prepare_kernel_cred(current);
98566 if (!new)
98567@@ -254,8 +326,8 @@ static int ____call_usermodehelper(void *data)
98568 commit_creds(new);
98569
98570 retval = do_execve(getname_kernel(sub_info->path),
98571- (const char __user *const __user *)sub_info->argv,
98572- (const char __user *const __user *)sub_info->envp);
98573+ (const char __user *const __force_user *)sub_info->argv,
98574+ (const char __user *const __force_user *)sub_info->envp);
98575 out:
98576 sub_info->retval = retval;
98577 /* wait_for_helper() will call umh_complete if UHM_WAIT_PROC. */
98578@@ -288,7 +360,7 @@ static int wait_for_helper(void *data)
98579 *
98580 * Thus the __user pointer cast is valid here.
98581 */
98582- sys_wait4(pid, (int __user *)&ret, 0, NULL);
98583+ sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
98584
98585 /*
98586 * If ret is 0, either ____call_usermodehelper failed and the
98587@@ -510,7 +582,12 @@ struct subprocess_info *call_usermodehelper_setup(char *path, char **argv,
98588 goto out;
98589
98590 INIT_WORK(&sub_info->work, __call_usermodehelper);
98591+#ifdef CONFIG_GRKERNSEC
98592+ sub_info->origpath = path;
98593+ sub_info->path = kstrdup(path, gfp_mask);
98594+#else
98595 sub_info->path = path;
98596+#endif
98597 sub_info->argv = argv;
98598 sub_info->envp = envp;
98599
98600@@ -612,7 +689,7 @@ EXPORT_SYMBOL(call_usermodehelper);
98601 static int proc_cap_handler(struct ctl_table *table, int write,
98602 void __user *buffer, size_t *lenp, loff_t *ppos)
98603 {
98604- struct ctl_table t;
98605+ ctl_table_no_const t;
98606 unsigned long cap_array[_KERNEL_CAPABILITY_U32S];
98607 kernel_cap_t new_cap;
98608 int err, i;
98609diff --git a/kernel/kprobes.c b/kernel/kprobes.c
98610index c90e417..e6c515d 100644
98611--- a/kernel/kprobes.c
98612+++ b/kernel/kprobes.c
98613@@ -31,6 +31,9 @@
98614 * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
98615 * <prasanna@in.ibm.com> added function-return probes.
98616 */
98617+#ifdef CONFIG_GRKERNSEC_HIDESYM
98618+#define __INCLUDED_BY_HIDESYM 1
98619+#endif
98620 #include <linux/kprobes.h>
98621 #include <linux/hash.h>
98622 #include <linux/init.h>
98623@@ -122,12 +125,12 @@ enum kprobe_slot_state {
98624
98625 static void *alloc_insn_page(void)
98626 {
98627- return module_alloc(PAGE_SIZE);
98628+ return module_alloc_exec(PAGE_SIZE);
98629 }
98630
98631 static void free_insn_page(void *page)
98632 {
98633- module_memfree(page);
98634+ module_memfree_exec(page);
98635 }
98636
98637 struct kprobe_insn_cache kprobe_insn_slots = {
98638@@ -2198,11 +2201,11 @@ static void report_probe(struct seq_file *pi, struct kprobe *p,
98639 kprobe_type = "k";
98640
98641 if (sym)
98642- seq_printf(pi, "%p %s %s+0x%x %s ",
98643+ seq_printf(pi, "%pK %s %s+0x%x %s ",
98644 p->addr, kprobe_type, sym, offset,
98645 (modname ? modname : " "));
98646 else
98647- seq_printf(pi, "%p %s %p ",
98648+ seq_printf(pi, "%pK %s %pK ",
98649 p->addr, kprobe_type, p->addr);
98650
98651 if (!pp)
98652diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
98653index 6683cce..daf8999 100644
98654--- a/kernel/ksysfs.c
98655+++ b/kernel/ksysfs.c
98656@@ -50,6 +50,8 @@ static ssize_t uevent_helper_store(struct kobject *kobj,
98657 {
98658 if (count+1 > UEVENT_HELPER_PATH_LEN)
98659 return -ENOENT;
98660+ if (!capable(CAP_SYS_ADMIN))
98661+ return -EPERM;
98662 memcpy(uevent_helper, buf, count);
98663 uevent_helper[count] = '\0';
98664 if (count && uevent_helper[count-1] == '\n')
98665@@ -176,7 +178,7 @@ static ssize_t notes_read(struct file *filp, struct kobject *kobj,
98666 return count;
98667 }
98668
98669-static struct bin_attribute notes_attr = {
98670+static bin_attribute_no_const notes_attr __read_only = {
98671 .attr = {
98672 .name = "notes",
98673 .mode = S_IRUGO,
98674diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
98675index ba77ab5..d6a3e20 100644
98676--- a/kernel/locking/lockdep.c
98677+++ b/kernel/locking/lockdep.c
98678@@ -599,6 +599,10 @@ static int static_obj(void *obj)
98679 end = (unsigned long) &_end,
98680 addr = (unsigned long) obj;
98681
98682+#ifdef CONFIG_PAX_KERNEXEC
98683+ start = ktla_ktva(start);
98684+#endif
98685+
98686 /*
98687 * static variable?
98688 */
98689@@ -743,6 +747,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
98690 if (!static_obj(lock->key)) {
98691 debug_locks_off();
98692 printk("INFO: trying to register non-static key.\n");
98693+ printk("lock:%pS key:%pS.\n", lock, lock->key);
98694 printk("the code is fine but needs lockdep annotation.\n");
98695 printk("turning off the locking correctness validator.\n");
98696 dump_stack();
98697@@ -3088,7 +3093,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
98698 if (!class)
98699 return 0;
98700 }
98701- atomic_inc((atomic_t *)&class->ops);
98702+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)&class->ops);
98703 if (very_verbose(class)) {
98704 printk("\nacquire class [%p] %s", class->key, class->name);
98705 if (class->name_version > 1)
98706diff --git a/kernel/locking/lockdep_proc.c b/kernel/locking/lockdep_proc.c
98707index ef43ac4..2720dfa 100644
98708--- a/kernel/locking/lockdep_proc.c
98709+++ b/kernel/locking/lockdep_proc.c
98710@@ -65,7 +65,7 @@ static int l_show(struct seq_file *m, void *v)
98711 return 0;
98712 }
98713
98714- seq_printf(m, "%p", class->key);
98715+ seq_printf(m, "%pK", class->key);
98716 #ifdef CONFIG_DEBUG_LOCKDEP
98717 seq_printf(m, " OPS:%8ld", class->ops);
98718 #endif
98719@@ -83,7 +83,7 @@ static int l_show(struct seq_file *m, void *v)
98720
98721 list_for_each_entry(entry, &class->locks_after, entry) {
98722 if (entry->distance == 1) {
98723- seq_printf(m, " -> [%p] ", entry->class->key);
98724+ seq_printf(m, " -> [%pK] ", entry->class->key);
98725 print_name(m, entry->class);
98726 seq_puts(m, "\n");
98727 }
98728@@ -152,7 +152,7 @@ static int lc_show(struct seq_file *m, void *v)
98729 if (!class->key)
98730 continue;
98731
98732- seq_printf(m, "[%p] ", class->key);
98733+ seq_printf(m, "[%pK] ", class->key);
98734 print_name(m, class);
98735 seq_puts(m, "\n");
98736 }
98737@@ -496,7 +496,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
98738 if (!i)
98739 seq_line(m, '-', 40-namelen, namelen);
98740
98741- snprintf(ip, sizeof(ip), "[<%p>]",
98742+ snprintf(ip, sizeof(ip), "[<%pK>]",
98743 (void *)class->contention_point[i]);
98744 seq_printf(m, "%40s %14lu %29s %pS\n",
98745 name, stats->contention_point[i],
98746@@ -511,7 +511,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
98747 if (!i)
98748 seq_line(m, '-', 40-namelen, namelen);
98749
98750- snprintf(ip, sizeof(ip), "[<%p>]",
98751+ snprintf(ip, sizeof(ip), "[<%pK>]",
98752 (void *)class->contending_point[i]);
98753 seq_printf(m, "%40s %14lu %29s %pS\n",
98754 name, stats->contending_point[i],
98755diff --git a/kernel/locking/mcs_spinlock.h b/kernel/locking/mcs_spinlock.h
98756index d1fe2ba..180cd65e 100644
98757--- a/kernel/locking/mcs_spinlock.h
98758+++ b/kernel/locking/mcs_spinlock.h
98759@@ -78,7 +78,7 @@ void mcs_spin_lock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
98760 */
98761 return;
98762 }
98763- ACCESS_ONCE(prev->next) = node;
98764+ ACCESS_ONCE_RW(prev->next) = node;
98765
98766 /* Wait until the lock holder passes the lock down. */
98767 arch_mcs_spin_lock_contended(&node->locked);
98768diff --git a/kernel/locking/mutex-debug.c b/kernel/locking/mutex-debug.c
98769index 3ef3736..9c951fa 100644
98770--- a/kernel/locking/mutex-debug.c
98771+++ b/kernel/locking/mutex-debug.c
98772@@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
98773 }
98774
98775 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
98776- struct thread_info *ti)
98777+ struct task_struct *task)
98778 {
98779 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
98780
98781 /* Mark the current thread as blocked on the lock: */
98782- ti->task->blocked_on = waiter;
98783+ task->blocked_on = waiter;
98784 }
98785
98786 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
98787- struct thread_info *ti)
98788+ struct task_struct *task)
98789 {
98790 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
98791- DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
98792- DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
98793- ti->task->blocked_on = NULL;
98794+ DEBUG_LOCKS_WARN_ON(waiter->task != task);
98795+ DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
98796+ task->blocked_on = NULL;
98797
98798 list_del_init(&waiter->list);
98799 waiter->task = NULL;
98800diff --git a/kernel/locking/mutex-debug.h b/kernel/locking/mutex-debug.h
98801index 0799fd3..d06ae3b 100644
98802--- a/kernel/locking/mutex-debug.h
98803+++ b/kernel/locking/mutex-debug.h
98804@@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
98805 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
98806 extern void debug_mutex_add_waiter(struct mutex *lock,
98807 struct mutex_waiter *waiter,
98808- struct thread_info *ti);
98809+ struct task_struct *task);
98810 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
98811- struct thread_info *ti);
98812+ struct task_struct *task);
98813 extern void debug_mutex_unlock(struct mutex *lock);
98814 extern void debug_mutex_init(struct mutex *lock, const char *name,
98815 struct lock_class_key *key);
98816diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
98817index 94674e5..de4966f 100644
98818--- a/kernel/locking/mutex.c
98819+++ b/kernel/locking/mutex.c
98820@@ -542,7 +542,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
98821 goto skip_wait;
98822
98823 debug_mutex_lock_common(lock, &waiter);
98824- debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
98825+ debug_mutex_add_waiter(lock, &waiter, task);
98826
98827 /* add waiting tasks to the end of the waitqueue (FIFO): */
98828 list_add_tail(&waiter.list, &lock->wait_list);
98829@@ -589,7 +589,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
98830 }
98831 __set_task_state(task, TASK_RUNNING);
98832
98833- mutex_remove_waiter(lock, &waiter, current_thread_info());
98834+ mutex_remove_waiter(lock, &waiter, task);
98835 /* set it to 0 if there are no waiters left: */
98836 if (likely(list_empty(&lock->wait_list)))
98837 atomic_set(&lock->count, 0);
98838@@ -610,7 +610,7 @@ skip_wait:
98839 return 0;
98840
98841 err:
98842- mutex_remove_waiter(lock, &waiter, task_thread_info(task));
98843+ mutex_remove_waiter(lock, &waiter, task);
98844 spin_unlock_mutex(&lock->wait_lock, flags);
98845 debug_mutex_free_waiter(&waiter);
98846 mutex_release(&lock->dep_map, 1, ip);
98847diff --git a/kernel/locking/osq_lock.c b/kernel/locking/osq_lock.c
98848index c112d00..1946ad9 100644
98849--- a/kernel/locking/osq_lock.c
98850+++ b/kernel/locking/osq_lock.c
98851@@ -98,7 +98,7 @@ bool osq_lock(struct optimistic_spin_queue *lock)
98852
98853 prev = decode_cpu(old);
98854 node->prev = prev;
98855- ACCESS_ONCE(prev->next) = node;
98856+ ACCESS_ONCE_RW(prev->next) = node;
98857
98858 /*
98859 * Normally @prev is untouchable after the above store; because at that
98860@@ -170,8 +170,8 @@ unqueue:
98861 * it will wait in Step-A.
98862 */
98863
98864- ACCESS_ONCE(next->prev) = prev;
98865- ACCESS_ONCE(prev->next) = next;
98866+ ACCESS_ONCE_RW(next->prev) = prev;
98867+ ACCESS_ONCE_RW(prev->next) = next;
98868
98869 return false;
98870 }
98871@@ -193,11 +193,11 @@ void osq_unlock(struct optimistic_spin_queue *lock)
98872 node = this_cpu_ptr(&osq_node);
98873 next = xchg(&node->next, NULL);
98874 if (next) {
98875- ACCESS_ONCE(next->locked) = 1;
98876+ ACCESS_ONCE_RW(next->locked) = 1;
98877 return;
98878 }
98879
98880 next = osq_wait_next(lock, node, NULL);
98881 if (next)
98882- ACCESS_ONCE(next->locked) = 1;
98883+ ACCESS_ONCE_RW(next->locked) = 1;
98884 }
98885diff --git a/kernel/locking/rtmutex-tester.c b/kernel/locking/rtmutex-tester.c
98886index 1d96dd0..994ff19 100644
98887--- a/kernel/locking/rtmutex-tester.c
98888+++ b/kernel/locking/rtmutex-tester.c
98889@@ -22,7 +22,7 @@
98890 #define MAX_RT_TEST_MUTEXES 8
98891
98892 static spinlock_t rttest_lock;
98893-static atomic_t rttest_event;
98894+static atomic_unchecked_t rttest_event;
98895
98896 struct test_thread_data {
98897 int opcode;
98898@@ -63,7 +63,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
98899
98900 case RTTEST_LOCKCONT:
98901 td->mutexes[td->opdata] = 1;
98902- td->event = atomic_add_return(1, &rttest_event);
98903+ td->event = atomic_add_return_unchecked(1, &rttest_event);
98904 return 0;
98905
98906 case RTTEST_RESET:
98907@@ -76,7 +76,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
98908 return 0;
98909
98910 case RTTEST_RESETEVENT:
98911- atomic_set(&rttest_event, 0);
98912+ atomic_set_unchecked(&rttest_event, 0);
98913 return 0;
98914
98915 default:
98916@@ -93,9 +93,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
98917 return ret;
98918
98919 td->mutexes[id] = 1;
98920- td->event = atomic_add_return(1, &rttest_event);
98921+ td->event = atomic_add_return_unchecked(1, &rttest_event);
98922 rt_mutex_lock(&mutexes[id]);
98923- td->event = atomic_add_return(1, &rttest_event);
98924+ td->event = atomic_add_return_unchecked(1, &rttest_event);
98925 td->mutexes[id] = 4;
98926 return 0;
98927
98928@@ -106,9 +106,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
98929 return ret;
98930
98931 td->mutexes[id] = 1;
98932- td->event = atomic_add_return(1, &rttest_event);
98933+ td->event = atomic_add_return_unchecked(1, &rttest_event);
98934 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
98935- td->event = atomic_add_return(1, &rttest_event);
98936+ td->event = atomic_add_return_unchecked(1, &rttest_event);
98937 td->mutexes[id] = ret ? 0 : 4;
98938 return ret ? -EINTR : 0;
98939
98940@@ -117,9 +117,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
98941 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
98942 return ret;
98943
98944- td->event = atomic_add_return(1, &rttest_event);
98945+ td->event = atomic_add_return_unchecked(1, &rttest_event);
98946 rt_mutex_unlock(&mutexes[id]);
98947- td->event = atomic_add_return(1, &rttest_event);
98948+ td->event = atomic_add_return_unchecked(1, &rttest_event);
98949 td->mutexes[id] = 0;
98950 return 0;
98951
98952@@ -166,7 +166,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
98953 break;
98954
98955 td->mutexes[dat] = 2;
98956- td->event = atomic_add_return(1, &rttest_event);
98957+ td->event = atomic_add_return_unchecked(1, &rttest_event);
98958 break;
98959
98960 default:
98961@@ -186,7 +186,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
98962 return;
98963
98964 td->mutexes[dat] = 3;
98965- td->event = atomic_add_return(1, &rttest_event);
98966+ td->event = atomic_add_return_unchecked(1, &rttest_event);
98967 break;
98968
98969 case RTTEST_LOCKNOWAIT:
98970@@ -198,7 +198,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
98971 return;
98972
98973 td->mutexes[dat] = 1;
98974- td->event = atomic_add_return(1, &rttest_event);
98975+ td->event = atomic_add_return_unchecked(1, &rttest_event);
98976 return;
98977
98978 default:
98979diff --git a/kernel/module.c b/kernel/module.c
98980index 538794c..76d7957 100644
98981--- a/kernel/module.c
98982+++ b/kernel/module.c
98983@@ -59,6 +59,7 @@
98984 #include <linux/jump_label.h>
98985 #include <linux/pfn.h>
98986 #include <linux/bsearch.h>
98987+#include <linux/grsecurity.h>
98988 #include <uapi/linux/module.h>
98989 #include "module-internal.h"
98990
98991@@ -155,7 +156,8 @@ static BLOCKING_NOTIFIER_HEAD(module_notify_list);
98992
98993 /* Bounds of module allocation, for speeding __module_address.
98994 * Protected by module_mutex. */
98995-static unsigned long module_addr_min = -1UL, module_addr_max = 0;
98996+static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
98997+static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
98998
98999 int register_module_notifier(struct notifier_block *nb)
99000 {
99001@@ -322,7 +324,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
99002 return true;
99003
99004 list_for_each_entry_rcu(mod, &modules, list) {
99005- struct symsearch arr[] = {
99006+ struct symsearch modarr[] = {
99007 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
99008 NOT_GPL_ONLY, false },
99009 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
99010@@ -347,7 +349,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
99011 if (mod->state == MODULE_STATE_UNFORMED)
99012 continue;
99013
99014- if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
99015+ if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
99016 return true;
99017 }
99018 return false;
99019@@ -487,7 +489,7 @@ static int percpu_modalloc(struct module *mod, struct load_info *info)
99020 if (!pcpusec->sh_size)
99021 return 0;
99022
99023- if (align > PAGE_SIZE) {
99024+ if (align-1 >= PAGE_SIZE) {
99025 pr_warn("%s: per-cpu alignment %li > %li\n",
99026 mod->name, align, PAGE_SIZE);
99027 align = PAGE_SIZE;
99028@@ -1053,7 +1055,7 @@ struct module_attribute module_uevent =
99029 static ssize_t show_coresize(struct module_attribute *mattr,
99030 struct module_kobject *mk, char *buffer)
99031 {
99032- return sprintf(buffer, "%u\n", mk->mod->core_size);
99033+ return sprintf(buffer, "%u\n", mk->mod->core_size_rx + mk->mod->core_size_rw);
99034 }
99035
99036 static struct module_attribute modinfo_coresize =
99037@@ -1062,7 +1064,7 @@ static struct module_attribute modinfo_coresize =
99038 static ssize_t show_initsize(struct module_attribute *mattr,
99039 struct module_kobject *mk, char *buffer)
99040 {
99041- return sprintf(buffer, "%u\n", mk->mod->init_size);
99042+ return sprintf(buffer, "%u\n", mk->mod->init_size_rx + mk->mod->init_size_rw);
99043 }
99044
99045 static struct module_attribute modinfo_initsize =
99046@@ -1154,12 +1156,29 @@ static int check_version(Elf_Shdr *sechdrs,
99047 goto bad_version;
99048 }
99049
99050+#ifdef CONFIG_GRKERNSEC_RANDSTRUCT
99051+ /*
99052+ * avoid potentially printing jibberish on attempted load
99053+ * of a module randomized with a different seed
99054+ */
99055+ pr_warn("no symbol version for %s\n", symname);
99056+#else
99057 pr_warn("%s: no symbol version for %s\n", mod->name, symname);
99058+#endif
99059 return 0;
99060
99061 bad_version:
99062+#ifdef CONFIG_GRKERNSEC_RANDSTRUCT
99063+ /*
99064+ * avoid potentially printing jibberish on attempted load
99065+ * of a module randomized with a different seed
99066+ */
99067+ pr_warn("attempted module disagrees about version of symbol %s\n",
99068+ symname);
99069+#else
99070 pr_warn("%s: disagrees about version of symbol %s\n",
99071 mod->name, symname);
99072+#endif
99073 return 0;
99074 }
99075
99076@@ -1281,7 +1300,7 @@ resolve_symbol_wait(struct module *mod,
99077 */
99078 #ifdef CONFIG_SYSFS
99079
99080-#ifdef CONFIG_KALLSYMS
99081+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
99082 static inline bool sect_empty(const Elf_Shdr *sect)
99083 {
99084 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
99085@@ -1419,7 +1438,7 @@ static void add_notes_attrs(struct module *mod, const struct load_info *info)
99086 {
99087 unsigned int notes, loaded, i;
99088 struct module_notes_attrs *notes_attrs;
99089- struct bin_attribute *nattr;
99090+ bin_attribute_no_const *nattr;
99091
99092 /* failed to create section attributes, so can't create notes */
99093 if (!mod->sect_attrs)
99094@@ -1531,7 +1550,7 @@ static void del_usage_links(struct module *mod)
99095 static int module_add_modinfo_attrs(struct module *mod)
99096 {
99097 struct module_attribute *attr;
99098- struct module_attribute *temp_attr;
99099+ module_attribute_no_const *temp_attr;
99100 int error = 0;
99101 int i;
99102
99103@@ -1741,21 +1760,21 @@ static void set_section_ro_nx(void *base,
99104
99105 static void unset_module_core_ro_nx(struct module *mod)
99106 {
99107- set_page_attributes(mod->module_core + mod->core_text_size,
99108- mod->module_core + mod->core_size,
99109+ set_page_attributes(mod->module_core_rw,
99110+ mod->module_core_rw + mod->core_size_rw,
99111 set_memory_x);
99112- set_page_attributes(mod->module_core,
99113- mod->module_core + mod->core_ro_size,
99114+ set_page_attributes(mod->module_core_rx,
99115+ mod->module_core_rx + mod->core_size_rx,
99116 set_memory_rw);
99117 }
99118
99119 static void unset_module_init_ro_nx(struct module *mod)
99120 {
99121- set_page_attributes(mod->module_init + mod->init_text_size,
99122- mod->module_init + mod->init_size,
99123+ set_page_attributes(mod->module_init_rw,
99124+ mod->module_init_rw + mod->init_size_rw,
99125 set_memory_x);
99126- set_page_attributes(mod->module_init,
99127- mod->module_init + mod->init_ro_size,
99128+ set_page_attributes(mod->module_init_rx,
99129+ mod->module_init_rx + mod->init_size_rx,
99130 set_memory_rw);
99131 }
99132
99133@@ -1768,14 +1787,14 @@ void set_all_modules_text_rw(void)
99134 list_for_each_entry_rcu(mod, &modules, list) {
99135 if (mod->state == MODULE_STATE_UNFORMED)
99136 continue;
99137- if ((mod->module_core) && (mod->core_text_size)) {
99138- set_page_attributes(mod->module_core,
99139- mod->module_core + mod->core_text_size,
99140+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
99141+ set_page_attributes(mod->module_core_rx,
99142+ mod->module_core_rx + mod->core_size_rx,
99143 set_memory_rw);
99144 }
99145- if ((mod->module_init) && (mod->init_text_size)) {
99146- set_page_attributes(mod->module_init,
99147- mod->module_init + mod->init_text_size,
99148+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
99149+ set_page_attributes(mod->module_init_rx,
99150+ mod->module_init_rx + mod->init_size_rx,
99151 set_memory_rw);
99152 }
99153 }
99154@@ -1791,14 +1810,14 @@ void set_all_modules_text_ro(void)
99155 list_for_each_entry_rcu(mod, &modules, list) {
99156 if (mod->state == MODULE_STATE_UNFORMED)
99157 continue;
99158- if ((mod->module_core) && (mod->core_text_size)) {
99159- set_page_attributes(mod->module_core,
99160- mod->module_core + mod->core_text_size,
99161+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
99162+ set_page_attributes(mod->module_core_rx,
99163+ mod->module_core_rx + mod->core_size_rx,
99164 set_memory_ro);
99165 }
99166- if ((mod->module_init) && (mod->init_text_size)) {
99167- set_page_attributes(mod->module_init,
99168- mod->module_init + mod->init_text_size,
99169+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
99170+ set_page_attributes(mod->module_init_rx,
99171+ mod->module_init_rx + mod->init_size_rx,
99172 set_memory_ro);
99173 }
99174 }
99175@@ -1807,7 +1826,15 @@ void set_all_modules_text_ro(void)
99176 #else
99177 static inline void set_section_ro_nx(void *base, unsigned long text_size, unsigned long ro_size, unsigned long total_size) { }
99178 static void unset_module_core_ro_nx(struct module *mod) { }
99179-static void unset_module_init_ro_nx(struct module *mod) { }
99180+static void unset_module_init_ro_nx(struct module *mod)
99181+{
99182+
99183+#ifdef CONFIG_PAX_KERNEXEC
99184+ set_memory_nx((unsigned long)mod->module_init_rx, PFN_UP(mod->init_size_rx));
99185+ set_memory_rw((unsigned long)mod->module_init_rx, PFN_UP(mod->init_size_rx));
99186+#endif
99187+
99188+}
99189 #endif
99190
99191 void __weak module_memfree(void *module_region)
99192@@ -1861,16 +1888,19 @@ static void free_module(struct module *mod)
99193 /* This may be NULL, but that's OK */
99194 unset_module_init_ro_nx(mod);
99195 module_arch_freeing_init(mod);
99196- module_memfree(mod->module_init);
99197+ module_memfree(mod->module_init_rw);
99198+ module_memfree_exec(mod->module_init_rx);
99199 kfree(mod->args);
99200 percpu_modfree(mod);
99201
99202 /* Free lock-classes; relies on the preceding sync_rcu(). */
99203- lockdep_free_key_range(mod->module_core, mod->core_size);
99204+ lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
99205+ lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
99206
99207 /* Finally, free the core (containing the module structure) */
99208 unset_module_core_ro_nx(mod);
99209- module_memfree(mod->module_core);
99210+ module_memfree_exec(mod->module_core_rx);
99211+ module_memfree(mod->module_core_rw);
99212
99213 #ifdef CONFIG_MPU
99214 update_protections(current->mm);
99215@@ -1939,9 +1969,31 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
99216 int ret = 0;
99217 const struct kernel_symbol *ksym;
99218
99219+#ifdef CONFIG_GRKERNSEC_MODHARDEN
99220+ int is_fs_load = 0;
99221+ int register_filesystem_found = 0;
99222+ char *p;
99223+
99224+ p = strstr(mod->args, "grsec_modharden_fs");
99225+ if (p) {
99226+ char *endptr = p + sizeof("grsec_modharden_fs") - 1;
99227+ /* copy \0 as well */
99228+ memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
99229+ is_fs_load = 1;
99230+ }
99231+#endif
99232+
99233 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
99234 const char *name = info->strtab + sym[i].st_name;
99235
99236+#ifdef CONFIG_GRKERNSEC_MODHARDEN
99237+ /* it's a real shame this will never get ripped and copied
99238+ upstream! ;(
99239+ */
99240+ if (is_fs_load && !strcmp(name, "register_filesystem"))
99241+ register_filesystem_found = 1;
99242+#endif
99243+
99244 switch (sym[i].st_shndx) {
99245 case SHN_COMMON:
99246 /* Ignore common symbols */
99247@@ -1966,7 +2018,9 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
99248 ksym = resolve_symbol_wait(mod, info, name);
99249 /* Ok if resolved. */
99250 if (ksym && !IS_ERR(ksym)) {
99251+ pax_open_kernel();
99252 sym[i].st_value = ksym->value;
99253+ pax_close_kernel();
99254 break;
99255 }
99256
99257@@ -1985,11 +2039,20 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
99258 secbase = (unsigned long)mod_percpu(mod);
99259 else
99260 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
99261+ pax_open_kernel();
99262 sym[i].st_value += secbase;
99263+ pax_close_kernel();
99264 break;
99265 }
99266 }
99267
99268+#ifdef CONFIG_GRKERNSEC_MODHARDEN
99269+ if (is_fs_load && !register_filesystem_found) {
99270+ printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
99271+ ret = -EPERM;
99272+ }
99273+#endif
99274+
99275 return ret;
99276 }
99277
99278@@ -2073,22 +2136,12 @@ static void layout_sections(struct module *mod, struct load_info *info)
99279 || s->sh_entsize != ~0UL
99280 || strstarts(sname, ".init"))
99281 continue;
99282- s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
99283+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
99284+ s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
99285+ else
99286+ s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
99287 pr_debug("\t%s\n", sname);
99288 }
99289- switch (m) {
99290- case 0: /* executable */
99291- mod->core_size = debug_align(mod->core_size);
99292- mod->core_text_size = mod->core_size;
99293- break;
99294- case 1: /* RO: text and ro-data */
99295- mod->core_size = debug_align(mod->core_size);
99296- mod->core_ro_size = mod->core_size;
99297- break;
99298- case 3: /* whole core */
99299- mod->core_size = debug_align(mod->core_size);
99300- break;
99301- }
99302 }
99303
99304 pr_debug("Init section allocation order:\n");
99305@@ -2102,23 +2155,13 @@ static void layout_sections(struct module *mod, struct load_info *info)
99306 || s->sh_entsize != ~0UL
99307 || !strstarts(sname, ".init"))
99308 continue;
99309- s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
99310- | INIT_OFFSET_MASK);
99311+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
99312+ s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
99313+ else
99314+ s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
99315+ s->sh_entsize |= INIT_OFFSET_MASK;
99316 pr_debug("\t%s\n", sname);
99317 }
99318- switch (m) {
99319- case 0: /* executable */
99320- mod->init_size = debug_align(mod->init_size);
99321- mod->init_text_size = mod->init_size;
99322- break;
99323- case 1: /* RO: text and ro-data */
99324- mod->init_size = debug_align(mod->init_size);
99325- mod->init_ro_size = mod->init_size;
99326- break;
99327- case 3: /* whole init */
99328- mod->init_size = debug_align(mod->init_size);
99329- break;
99330- }
99331 }
99332 }
99333
99334@@ -2291,7 +2334,7 @@ static void layout_symtab(struct module *mod, struct load_info *info)
99335
99336 /* Put symbol section at end of init part of module. */
99337 symsect->sh_flags |= SHF_ALLOC;
99338- symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
99339+ symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
99340 info->index.sym) | INIT_OFFSET_MASK;
99341 pr_debug("\t%s\n", info->secstrings + symsect->sh_name);
99342
99343@@ -2308,16 +2351,16 @@ static void layout_symtab(struct module *mod, struct load_info *info)
99344 }
99345
99346 /* Append room for core symbols at end of core part. */
99347- info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
99348- info->stroffs = mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
99349- mod->core_size += strtab_size;
99350- mod->core_size = debug_align(mod->core_size);
99351+ info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
99352+ info->stroffs = mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
99353+ mod->core_size_rx += strtab_size;
99354+ mod->core_size_rx = debug_align(mod->core_size_rx);
99355
99356 /* Put string table section at end of init part of module. */
99357 strsect->sh_flags |= SHF_ALLOC;
99358- strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
99359+ strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
99360 info->index.str) | INIT_OFFSET_MASK;
99361- mod->init_size = debug_align(mod->init_size);
99362+ mod->init_size_rx = debug_align(mod->init_size_rx);
99363 pr_debug("\t%s\n", info->secstrings + strsect->sh_name);
99364 }
99365
99366@@ -2334,12 +2377,14 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
99367 /* Make sure we get permanent strtab: don't use info->strtab. */
99368 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
99369
99370+ pax_open_kernel();
99371+
99372 /* Set types up while we still have access to sections. */
99373 for (i = 0; i < mod->num_symtab; i++)
99374 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
99375
99376- mod->core_symtab = dst = mod->module_core + info->symoffs;
99377- mod->core_strtab = s = mod->module_core + info->stroffs;
99378+ mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
99379+ mod->core_strtab = s = mod->module_core_rx + info->stroffs;
99380 src = mod->symtab;
99381 for (ndst = i = 0; i < mod->num_symtab; i++) {
99382 if (i == 0 ||
99383@@ -2351,6 +2396,8 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
99384 }
99385 }
99386 mod->core_num_syms = ndst;
99387+
99388+ pax_close_kernel();
99389 }
99390 #else
99391 static inline void layout_symtab(struct module *mod, struct load_info *info)
99392@@ -2384,17 +2431,33 @@ void * __weak module_alloc(unsigned long size)
99393 return vmalloc_exec(size);
99394 }
99395
99396-static void *module_alloc_update_bounds(unsigned long size)
99397+static void *module_alloc_update_bounds_rw(unsigned long size)
99398 {
99399 void *ret = module_alloc(size);
99400
99401 if (ret) {
99402 mutex_lock(&module_mutex);
99403 /* Update module bounds. */
99404- if ((unsigned long)ret < module_addr_min)
99405- module_addr_min = (unsigned long)ret;
99406- if ((unsigned long)ret + size > module_addr_max)
99407- module_addr_max = (unsigned long)ret + size;
99408+ if ((unsigned long)ret < module_addr_min_rw)
99409+ module_addr_min_rw = (unsigned long)ret;
99410+ if ((unsigned long)ret + size > module_addr_max_rw)
99411+ module_addr_max_rw = (unsigned long)ret + size;
99412+ mutex_unlock(&module_mutex);
99413+ }
99414+ return ret;
99415+}
99416+
99417+static void *module_alloc_update_bounds_rx(unsigned long size)
99418+{
99419+ void *ret = module_alloc_exec(size);
99420+
99421+ if (ret) {
99422+ mutex_lock(&module_mutex);
99423+ /* Update module bounds. */
99424+ if ((unsigned long)ret < module_addr_min_rx)
99425+ module_addr_min_rx = (unsigned long)ret;
99426+ if ((unsigned long)ret + size > module_addr_max_rx)
99427+ module_addr_max_rx = (unsigned long)ret + size;
99428 mutex_unlock(&module_mutex);
99429 }
99430 return ret;
99431@@ -2665,7 +2728,15 @@ static struct module *setup_load_info(struct load_info *info, int flags)
99432 mod = (void *)info->sechdrs[info->index.mod].sh_addr;
99433
99434 if (info->index.sym == 0) {
99435+#ifdef CONFIG_GRKERNSEC_RANDSTRUCT
99436+ /*
99437+ * avoid potentially printing jibberish on attempted load
99438+ * of a module randomized with a different seed
99439+ */
99440+ pr_warn("module has no symbols (stripped?)\n");
99441+#else
99442 pr_warn("%s: module has no symbols (stripped?)\n", mod->name);
99443+#endif
99444 return ERR_PTR(-ENOEXEC);
99445 }
99446
99447@@ -2681,8 +2752,14 @@ static struct module *setup_load_info(struct load_info *info, int flags)
99448 static int check_modinfo(struct module *mod, struct load_info *info, int flags)
99449 {
99450 const char *modmagic = get_modinfo(info, "vermagic");
99451+ const char *license = get_modinfo(info, "license");
99452 int err;
99453
99454+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
99455+ if (!license || !license_is_gpl_compatible(license))
99456+ return -ENOEXEC;
99457+#endif
99458+
99459 if (flags & MODULE_INIT_IGNORE_VERMAGIC)
99460 modmagic = NULL;
99461
99462@@ -2707,7 +2784,7 @@ static int check_modinfo(struct module *mod, struct load_info *info, int flags)
99463 }
99464
99465 /* Set up license info based on the info section */
99466- set_license(mod, get_modinfo(info, "license"));
99467+ set_license(mod, license);
99468
99469 return 0;
99470 }
99471@@ -2801,7 +2878,7 @@ static int move_module(struct module *mod, struct load_info *info)
99472 void *ptr;
99473
99474 /* Do the allocs. */
99475- ptr = module_alloc_update_bounds(mod->core_size);
99476+ ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
99477 /*
99478 * The pointer to this block is stored in the module structure
99479 * which is inside the block. Just mark it as not being a
99480@@ -2811,11 +2888,11 @@ static int move_module(struct module *mod, struct load_info *info)
99481 if (!ptr)
99482 return -ENOMEM;
99483
99484- memset(ptr, 0, mod->core_size);
99485- mod->module_core = ptr;
99486+ memset(ptr, 0, mod->core_size_rw);
99487+ mod->module_core_rw = ptr;
99488
99489- if (mod->init_size) {
99490- ptr = module_alloc_update_bounds(mod->init_size);
99491+ if (mod->init_size_rw) {
99492+ ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
99493 /*
99494 * The pointer to this block is stored in the module structure
99495 * which is inside the block. This block doesn't need to be
99496@@ -2824,13 +2901,45 @@ static int move_module(struct module *mod, struct load_info *info)
99497 */
99498 kmemleak_ignore(ptr);
99499 if (!ptr) {
99500- module_memfree(mod->module_core);
99501+ module_memfree(mod->module_core_rw);
99502 return -ENOMEM;
99503 }
99504- memset(ptr, 0, mod->init_size);
99505- mod->module_init = ptr;
99506+ memset(ptr, 0, mod->init_size_rw);
99507+ mod->module_init_rw = ptr;
99508 } else
99509- mod->module_init = NULL;
99510+ mod->module_init_rw = NULL;
99511+
99512+ ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
99513+ kmemleak_not_leak(ptr);
99514+ if (!ptr) {
99515+ if (mod->module_init_rw)
99516+ module_memfree(mod->module_init_rw);
99517+ module_memfree(mod->module_core_rw);
99518+ return -ENOMEM;
99519+ }
99520+
99521+ pax_open_kernel();
99522+ memset(ptr, 0, mod->core_size_rx);
99523+ pax_close_kernel();
99524+ mod->module_core_rx = ptr;
99525+
99526+ if (mod->init_size_rx) {
99527+ ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
99528+ kmemleak_ignore(ptr);
99529+ if (!ptr && mod->init_size_rx) {
99530+ module_memfree_exec(mod->module_core_rx);
99531+ if (mod->module_init_rw)
99532+ module_memfree(mod->module_init_rw);
99533+ module_memfree(mod->module_core_rw);
99534+ return -ENOMEM;
99535+ }
99536+
99537+ pax_open_kernel();
99538+ memset(ptr, 0, mod->init_size_rx);
99539+ pax_close_kernel();
99540+ mod->module_init_rx = ptr;
99541+ } else
99542+ mod->module_init_rx = NULL;
99543
99544 /* Transfer each section which specifies SHF_ALLOC */
99545 pr_debug("final section addresses:\n");
99546@@ -2841,16 +2950,45 @@ static int move_module(struct module *mod, struct load_info *info)
99547 if (!(shdr->sh_flags & SHF_ALLOC))
99548 continue;
99549
99550- if (shdr->sh_entsize & INIT_OFFSET_MASK)
99551- dest = mod->module_init
99552- + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
99553- else
99554- dest = mod->module_core + shdr->sh_entsize;
99555+ if (shdr->sh_entsize & INIT_OFFSET_MASK) {
99556+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
99557+ dest = mod->module_init_rw
99558+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
99559+ else
99560+ dest = mod->module_init_rx
99561+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
99562+ } else {
99563+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
99564+ dest = mod->module_core_rw + shdr->sh_entsize;
99565+ else
99566+ dest = mod->module_core_rx + shdr->sh_entsize;
99567+ }
99568+
99569+ if (shdr->sh_type != SHT_NOBITS) {
99570+
99571+#ifdef CONFIG_PAX_KERNEXEC
99572+#ifdef CONFIG_X86_64
99573+ if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
99574+ set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
99575+#endif
99576+ if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
99577+ pax_open_kernel();
99578+ memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
99579+ pax_close_kernel();
99580+ } else
99581+#endif
99582
99583- if (shdr->sh_type != SHT_NOBITS)
99584 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
99585+ }
99586 /* Update sh_addr to point to copy in image. */
99587- shdr->sh_addr = (unsigned long)dest;
99588+
99589+#ifdef CONFIG_PAX_KERNEXEC
99590+ if (shdr->sh_flags & SHF_EXECINSTR)
99591+ shdr->sh_addr = ktva_ktla((unsigned long)dest);
99592+ else
99593+#endif
99594+
99595+ shdr->sh_addr = (unsigned long)dest;
99596 pr_debug("\t0x%lx %s\n",
99597 (long)shdr->sh_addr, info->secstrings + shdr->sh_name);
99598 }
99599@@ -2907,12 +3045,12 @@ static void flush_module_icache(const struct module *mod)
99600 * Do it before processing of module parameters, so the module
99601 * can provide parameter accessor functions of its own.
99602 */
99603- if (mod->module_init)
99604- flush_icache_range((unsigned long)mod->module_init,
99605- (unsigned long)mod->module_init
99606- + mod->init_size);
99607- flush_icache_range((unsigned long)mod->module_core,
99608- (unsigned long)mod->module_core + mod->core_size);
99609+ if (mod->module_init_rx)
99610+ flush_icache_range((unsigned long)mod->module_init_rx,
99611+ (unsigned long)mod->module_init_rx
99612+ + mod->init_size_rx);
99613+ flush_icache_range((unsigned long)mod->module_core_rx,
99614+ (unsigned long)mod->module_core_rx + mod->core_size_rx);
99615
99616 set_fs(old_fs);
99617 }
99618@@ -2970,8 +3108,10 @@ static void module_deallocate(struct module *mod, struct load_info *info)
99619 {
99620 percpu_modfree(mod);
99621 module_arch_freeing_init(mod);
99622- module_memfree(mod->module_init);
99623- module_memfree(mod->module_core);
99624+ module_memfree_exec(mod->module_init_rx);
99625+ module_memfree_exec(mod->module_core_rx);
99626+ module_memfree(mod->module_init_rw);
99627+ module_memfree(mod->module_core_rw);
99628 }
99629
99630 int __weak module_finalize(const Elf_Ehdr *hdr,
99631@@ -2984,7 +3124,9 @@ int __weak module_finalize(const Elf_Ehdr *hdr,
99632 static int post_relocation(struct module *mod, const struct load_info *info)
99633 {
99634 /* Sort exception table now relocations are done. */
99635+ pax_open_kernel();
99636 sort_extable(mod->extable, mod->extable + mod->num_exentries);
99637+ pax_close_kernel();
99638
99639 /* Copy relocated percpu area over. */
99640 percpu_modcopy(mod, (void *)info->sechdrs[info->index.pcpu].sh_addr,
99641@@ -3032,13 +3174,15 @@ static void do_mod_ctors(struct module *mod)
99642 /* For freeing module_init on success, in case kallsyms traversing */
99643 struct mod_initfree {
99644 struct rcu_head rcu;
99645- void *module_init;
99646+ void *module_init_rw;
99647+ void *module_init_rx;
99648 };
99649
99650 static void do_free_init(struct rcu_head *head)
99651 {
99652 struct mod_initfree *m = container_of(head, struct mod_initfree, rcu);
99653- module_memfree(m->module_init);
99654+ module_memfree(m->module_init_rw);
99655+ module_memfree_exec(m->module_init_rx);
99656 kfree(m);
99657 }
99658
99659@@ -3058,7 +3202,8 @@ static noinline int do_init_module(struct module *mod)
99660 ret = -ENOMEM;
99661 goto fail;
99662 }
99663- freeinit->module_init = mod->module_init;
99664+ freeinit->module_init_rw = mod->module_init_rw;
99665+ freeinit->module_init_rx = mod->module_init_rx;
99666
99667 /*
99668 * We want to find out whether @mod uses async during init. Clear
99669@@ -3117,10 +3262,10 @@ static noinline int do_init_module(struct module *mod)
99670 #endif
99671 unset_module_init_ro_nx(mod);
99672 module_arch_freeing_init(mod);
99673- mod->module_init = NULL;
99674- mod->init_size = 0;
99675- mod->init_ro_size = 0;
99676- mod->init_text_size = 0;
99677+ mod->module_init_rw = NULL;
99678+ mod->module_init_rx = NULL;
99679+ mod->init_size_rw = 0;
99680+ mod->init_size_rx = 0;
99681 /*
99682 * We want to free module_init, but be aware that kallsyms may be
99683 * walking this with preempt disabled. In all the failure paths,
99684@@ -3208,16 +3353,16 @@ static int complete_formation(struct module *mod, struct load_info *info)
99685 module_bug_finalize(info->hdr, info->sechdrs, mod);
99686
99687 /* Set RO and NX regions for core */
99688- set_section_ro_nx(mod->module_core,
99689- mod->core_text_size,
99690- mod->core_ro_size,
99691- mod->core_size);
99692+ set_section_ro_nx(mod->module_core_rx,
99693+ mod->core_size_rx,
99694+ mod->core_size_rx,
99695+ mod->core_size_rx);
99696
99697 /* Set RO and NX regions for init */
99698- set_section_ro_nx(mod->module_init,
99699- mod->init_text_size,
99700- mod->init_ro_size,
99701- mod->init_size);
99702+ set_section_ro_nx(mod->module_init_rx,
99703+ mod->init_size_rx,
99704+ mod->init_size_rx,
99705+ mod->init_size_rx);
99706
99707 /* Mark state as coming so strong_try_module_get() ignores us,
99708 * but kallsyms etc. can see us. */
99709@@ -3301,9 +3446,38 @@ static int load_module(struct load_info *info, const char __user *uargs,
99710 if (err)
99711 goto free_unload;
99712
99713+ /* Now copy in args */
99714+ mod->args = strndup_user(uargs, ~0UL >> 1);
99715+ if (IS_ERR(mod->args)) {
99716+ err = PTR_ERR(mod->args);
99717+ goto free_unload;
99718+ }
99719+
99720 /* Set up MODINFO_ATTR fields */
99721 setup_modinfo(mod, info);
99722
99723+#ifdef CONFIG_GRKERNSEC_MODHARDEN
99724+ {
99725+ char *p, *p2;
99726+
99727+ if (strstr(mod->args, "grsec_modharden_netdev")) {
99728+ printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
99729+ err = -EPERM;
99730+ goto free_modinfo;
99731+ } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
99732+ p += sizeof("grsec_modharden_normal") - 1;
99733+ p2 = strstr(p, "_");
99734+ if (p2) {
99735+ *p2 = '\0';
99736+ printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
99737+ *p2 = '_';
99738+ }
99739+ err = -EPERM;
99740+ goto free_modinfo;
99741+ }
99742+ }
99743+#endif
99744+
99745 /* Fix up syms, so that st_value is a pointer to location. */
99746 err = simplify_symbols(mod, info);
99747 if (err < 0)
99748@@ -3319,13 +3493,6 @@ static int load_module(struct load_info *info, const char __user *uargs,
99749
99750 flush_module_icache(mod);
99751
99752- /* Now copy in args */
99753- mod->args = strndup_user(uargs, ~0UL >> 1);
99754- if (IS_ERR(mod->args)) {
99755- err = PTR_ERR(mod->args);
99756- goto free_arch_cleanup;
99757- }
99758-
99759 dynamic_debug_setup(info->debug, info->num_debug);
99760
99761 /* Ftrace init must be called in the MODULE_STATE_UNFORMED state */
99762@@ -3376,11 +3543,10 @@ static int load_module(struct load_info *info, const char __user *uargs,
99763 ddebug_cleanup:
99764 dynamic_debug_remove(info->debug);
99765 synchronize_sched();
99766- kfree(mod->args);
99767- free_arch_cleanup:
99768 module_arch_cleanup(mod);
99769 free_modinfo:
99770 free_modinfo(mod);
99771+ kfree(mod->args);
99772 free_unload:
99773 module_unload_free(mod);
99774 unlink_mod:
99775@@ -3393,7 +3559,8 @@ static int load_module(struct load_info *info, const char __user *uargs,
99776 mutex_unlock(&module_mutex);
99777 free_module:
99778 /* Free lock-classes; relies on the preceding sync_rcu() */
99779- lockdep_free_key_range(mod->module_core, mod->core_size);
99780+ lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
99781+ lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
99782
99783 module_deallocate(mod, info);
99784 free_copy:
99785@@ -3470,10 +3637,16 @@ static const char *get_ksymbol(struct module *mod,
99786 unsigned long nextval;
99787
99788 /* At worse, next value is at end of module */
99789- if (within_module_init(addr, mod))
99790- nextval = (unsigned long)mod->module_init+mod->init_text_size;
99791+ if (within_module_init_rx(addr, mod))
99792+ nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
99793+ else if (within_module_init_rw(addr, mod))
99794+ nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
99795+ else if (within_module_core_rx(addr, mod))
99796+ nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
99797+ else if (within_module_core_rw(addr, mod))
99798+ nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
99799 else
99800- nextval = (unsigned long)mod->module_core+mod->core_text_size;
99801+ return NULL;
99802
99803 /* Scan for closest preceding symbol, and next symbol. (ELF
99804 starts real symbols at 1). */
99805@@ -3721,7 +3894,7 @@ static int m_show(struct seq_file *m, void *p)
99806 return 0;
99807
99808 seq_printf(m, "%s %u",
99809- mod->name, mod->init_size + mod->core_size);
99810+ mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
99811 print_unload_info(m, mod);
99812
99813 /* Informative for users. */
99814@@ -3730,7 +3903,7 @@ static int m_show(struct seq_file *m, void *p)
99815 mod->state == MODULE_STATE_COMING ? "Loading" :
99816 "Live");
99817 /* Used by oprofile and other similar tools. */
99818- seq_printf(m, " 0x%pK", mod->module_core);
99819+ seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
99820
99821 /* Taints info */
99822 if (mod->taints)
99823@@ -3766,7 +3939,17 @@ static const struct file_operations proc_modules_operations = {
99824
99825 static int __init proc_modules_init(void)
99826 {
99827+#ifndef CONFIG_GRKERNSEC_HIDESYM
99828+#ifdef CONFIG_GRKERNSEC_PROC_USER
99829+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
99830+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
99831+ proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
99832+#else
99833 proc_create("modules", 0, NULL, &proc_modules_operations);
99834+#endif
99835+#else
99836+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
99837+#endif
99838 return 0;
99839 }
99840 module_init(proc_modules_init);
99841@@ -3827,7 +4010,8 @@ struct module *__module_address(unsigned long addr)
99842 {
99843 struct module *mod;
99844
99845- if (addr < module_addr_min || addr > module_addr_max)
99846+ if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
99847+ (addr < module_addr_min_rw || addr > module_addr_max_rw))
99848 return NULL;
99849
99850 list_for_each_entry_rcu(mod, &modules, list) {
99851@@ -3868,11 +4052,20 @@ bool is_module_text_address(unsigned long addr)
99852 */
99853 struct module *__module_text_address(unsigned long addr)
99854 {
99855- struct module *mod = __module_address(addr);
99856+ struct module *mod;
99857+
99858+#ifdef CONFIG_X86_32
99859+ addr = ktla_ktva(addr);
99860+#endif
99861+
99862+ if (addr < module_addr_min_rx || addr > module_addr_max_rx)
99863+ return NULL;
99864+
99865+ mod = __module_address(addr);
99866+
99867 if (mod) {
99868 /* Make sure it's within the text section. */
99869- if (!within(addr, mod->module_init, mod->init_text_size)
99870- && !within(addr, mod->module_core, mod->core_text_size))
99871+ if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
99872 mod = NULL;
99873 }
99874 return mod;
99875diff --git a/kernel/notifier.c b/kernel/notifier.c
99876index ae9fc7c..5085fbf 100644
99877--- a/kernel/notifier.c
99878+++ b/kernel/notifier.c
99879@@ -5,6 +5,7 @@
99880 #include <linux/rcupdate.h>
99881 #include <linux/vmalloc.h>
99882 #include <linux/reboot.h>
99883+#include <linux/mm.h>
99884
99885 /*
99886 * Notifier list for kernel code which wants to be called
99887@@ -24,10 +25,12 @@ static int notifier_chain_register(struct notifier_block **nl,
99888 while ((*nl) != NULL) {
99889 if (n->priority > (*nl)->priority)
99890 break;
99891- nl = &((*nl)->next);
99892+ nl = (struct notifier_block **)&((*nl)->next);
99893 }
99894- n->next = *nl;
99895+ pax_open_kernel();
99896+ *(const void **)&n->next = *nl;
99897 rcu_assign_pointer(*nl, n);
99898+ pax_close_kernel();
99899 return 0;
99900 }
99901
99902@@ -39,10 +42,12 @@ static int notifier_chain_cond_register(struct notifier_block **nl,
99903 return 0;
99904 if (n->priority > (*nl)->priority)
99905 break;
99906- nl = &((*nl)->next);
99907+ nl = (struct notifier_block **)&((*nl)->next);
99908 }
99909- n->next = *nl;
99910+ pax_open_kernel();
99911+ *(const void **)&n->next = *nl;
99912 rcu_assign_pointer(*nl, n);
99913+ pax_close_kernel();
99914 return 0;
99915 }
99916
99917@@ -51,10 +56,12 @@ static int notifier_chain_unregister(struct notifier_block **nl,
99918 {
99919 while ((*nl) != NULL) {
99920 if ((*nl) == n) {
99921+ pax_open_kernel();
99922 rcu_assign_pointer(*nl, n->next);
99923+ pax_close_kernel();
99924 return 0;
99925 }
99926- nl = &((*nl)->next);
99927+ nl = (struct notifier_block **)&((*nl)->next);
99928 }
99929 return -ENOENT;
99930 }
99931diff --git a/kernel/padata.c b/kernel/padata.c
99932index b38bea9..91acfbe 100644
99933--- a/kernel/padata.c
99934+++ b/kernel/padata.c
99935@@ -54,7 +54,7 @@ static int padata_cpu_hash(struct parallel_data *pd)
99936 * seq_nr mod. number of cpus in use.
99937 */
99938
99939- seq_nr = atomic_inc_return(&pd->seq_nr);
99940+ seq_nr = atomic_inc_return_unchecked(&pd->seq_nr);
99941 cpu_index = seq_nr % cpumask_weight(pd->cpumask.pcpu);
99942
99943 return padata_index_to_cpu(pd, cpu_index);
99944@@ -428,7 +428,7 @@ static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst,
99945 padata_init_pqueues(pd);
99946 padata_init_squeues(pd);
99947 setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
99948- atomic_set(&pd->seq_nr, -1);
99949+ atomic_set_unchecked(&pd->seq_nr, -1);
99950 atomic_set(&pd->reorder_objects, 0);
99951 atomic_set(&pd->refcnt, 0);
99952 pd->pinst = pinst;
99953diff --git a/kernel/panic.c b/kernel/panic.c
99954index 8136ad7..15c857b 100644
99955--- a/kernel/panic.c
99956+++ b/kernel/panic.c
99957@@ -54,7 +54,7 @@ EXPORT_SYMBOL(panic_blink);
99958 /*
99959 * Stop ourself in panic -- architecture code may override this
99960 */
99961-void __weak panic_smp_self_stop(void)
99962+void __weak __noreturn panic_smp_self_stop(void)
99963 {
99964 while (1)
99965 cpu_relax();
99966@@ -425,7 +425,7 @@ static void warn_slowpath_common(const char *file, int line, void *caller,
99967 disable_trace_on_warning();
99968
99969 pr_warn("------------[ cut here ]------------\n");
99970- pr_warn("WARNING: CPU: %d PID: %d at %s:%d %pS()\n",
99971+ pr_warn("WARNING: CPU: %d PID: %d at %s:%d %pA()\n",
99972 raw_smp_processor_id(), current->pid, file, line, caller);
99973
99974 if (args)
99975@@ -490,7 +490,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
99976 */
99977 __visible void __stack_chk_fail(void)
99978 {
99979- panic("stack-protector: Kernel stack is corrupted in: %p\n",
99980+ dump_stack();
99981+ panic("stack-protector: Kernel stack is corrupted in: %pA\n",
99982 __builtin_return_address(0));
99983 }
99984 EXPORT_SYMBOL(__stack_chk_fail);
99985diff --git a/kernel/pid.c b/kernel/pid.c
99986index cd36a5e..11f185d 100644
99987--- a/kernel/pid.c
99988+++ b/kernel/pid.c
99989@@ -33,6 +33,7 @@
99990 #include <linux/rculist.h>
99991 #include <linux/bootmem.h>
99992 #include <linux/hash.h>
99993+#include <linux/security.h>
99994 #include <linux/pid_namespace.h>
99995 #include <linux/init_task.h>
99996 #include <linux/syscalls.h>
99997@@ -47,7 +48,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID;
99998
99999 int pid_max = PID_MAX_DEFAULT;
100000
100001-#define RESERVED_PIDS 300
100002+#define RESERVED_PIDS 500
100003
100004 int pid_max_min = RESERVED_PIDS + 1;
100005 int pid_max_max = PID_MAX_LIMIT;
100006@@ -450,10 +451,18 @@ EXPORT_SYMBOL(pid_task);
100007 */
100008 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
100009 {
100010+ struct task_struct *task;
100011+
100012 rcu_lockdep_assert(rcu_read_lock_held(),
100013 "find_task_by_pid_ns() needs rcu_read_lock()"
100014 " protection");
100015- return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
100016+
100017+ task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
100018+
100019+ if (gr_pid_is_chrooted(task))
100020+ return NULL;
100021+
100022+ return task;
100023 }
100024
100025 struct task_struct *find_task_by_vpid(pid_t vnr)
100026@@ -461,6 +470,14 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
100027 return find_task_by_pid_ns(vnr, task_active_pid_ns(current));
100028 }
100029
100030+struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
100031+{
100032+ rcu_lockdep_assert(rcu_read_lock_held(),
100033+ "find_task_by_pid_ns() needs rcu_read_lock()"
100034+ " protection");
100035+ return pid_task(find_pid_ns(vnr, task_active_pid_ns(current)), PIDTYPE_PID);
100036+}
100037+
100038 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
100039 {
100040 struct pid *pid;
100041diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
100042index a65ba13..f600dbb 100644
100043--- a/kernel/pid_namespace.c
100044+++ b/kernel/pid_namespace.c
100045@@ -274,7 +274,7 @@ static int pid_ns_ctl_handler(struct ctl_table *table, int write,
100046 void __user *buffer, size_t *lenp, loff_t *ppos)
100047 {
100048 struct pid_namespace *pid_ns = task_active_pid_ns(current);
100049- struct ctl_table tmp = *table;
100050+ ctl_table_no_const tmp = *table;
100051
100052 if (write && !ns_capable(pid_ns->user_ns, CAP_SYS_ADMIN))
100053 return -EPERM;
100054diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
100055index 7e01f78..f5da19d 100644
100056--- a/kernel/power/Kconfig
100057+++ b/kernel/power/Kconfig
100058@@ -24,6 +24,8 @@ config HIBERNATE_CALLBACKS
100059 config HIBERNATION
100060 bool "Hibernation (aka 'suspend to disk')"
100061 depends on SWAP && ARCH_HIBERNATION_POSSIBLE
100062+ depends on !GRKERNSEC_KMEM
100063+ depends on !PAX_MEMORY_SANITIZE
100064 select HIBERNATE_CALLBACKS
100065 select LZO_COMPRESS
100066 select LZO_DECOMPRESS
100067diff --git a/kernel/power/process.c b/kernel/power/process.c
100068index 564f786..361a18e 100644
100069--- a/kernel/power/process.c
100070+++ b/kernel/power/process.c
100071@@ -35,6 +35,7 @@ static int try_to_freeze_tasks(bool user_only)
100072 unsigned int elapsed_msecs;
100073 bool wakeup = false;
100074 int sleep_usecs = USEC_PER_MSEC;
100075+ bool timedout = false;
100076
100077 do_gettimeofday(&start);
100078
100079@@ -45,13 +46,20 @@ static int try_to_freeze_tasks(bool user_only)
100080
100081 while (true) {
100082 todo = 0;
100083+ if (time_after(jiffies, end_time))
100084+ timedout = true;
100085 read_lock(&tasklist_lock);
100086 for_each_process_thread(g, p) {
100087 if (p == current || !freeze_task(p))
100088 continue;
100089
100090- if (!freezer_should_skip(p))
100091+ if (!freezer_should_skip(p)) {
100092 todo++;
100093+ if (timedout) {
100094+ printk(KERN_ERR "Task refusing to freeze:\n");
100095+ sched_show_task(p);
100096+ }
100097+ }
100098 }
100099 read_unlock(&tasklist_lock);
100100
100101@@ -60,7 +68,7 @@ static int try_to_freeze_tasks(bool user_only)
100102 todo += wq_busy;
100103 }
100104
100105- if (!todo || time_after(jiffies, end_time))
100106+ if (!todo || timedout)
100107 break;
100108
100109 if (pm_wakeup_pending()) {
100110diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
100111index bb0635b..9aff9f3 100644
100112--- a/kernel/printk/printk.c
100113+++ b/kernel/printk/printk.c
100114@@ -486,6 +486,11 @@ int check_syslog_permissions(int type, bool from_file)
100115 if (from_file && type != SYSLOG_ACTION_OPEN)
100116 return 0;
100117
100118+#ifdef CONFIG_GRKERNSEC_DMESG
100119+ if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
100120+ return -EPERM;
100121+#endif
100122+
100123 if (syslog_action_restricted(type)) {
100124 if (capable(CAP_SYSLOG))
100125 return 0;
100126diff --git a/kernel/profile.c b/kernel/profile.c
100127index a7bcd28..5b368fa 100644
100128--- a/kernel/profile.c
100129+++ b/kernel/profile.c
100130@@ -37,7 +37,7 @@ struct profile_hit {
100131 #define NR_PROFILE_HIT (PAGE_SIZE/sizeof(struct profile_hit))
100132 #define NR_PROFILE_GRP (NR_PROFILE_HIT/PROFILE_GRPSZ)
100133
100134-static atomic_t *prof_buffer;
100135+static atomic_unchecked_t *prof_buffer;
100136 static unsigned long prof_len, prof_shift;
100137
100138 int prof_on __read_mostly;
100139@@ -256,7 +256,7 @@ static void profile_flip_buffers(void)
100140 hits[i].pc = 0;
100141 continue;
100142 }
100143- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
100144+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
100145 hits[i].hits = hits[i].pc = 0;
100146 }
100147 }
100148@@ -317,9 +317,9 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
100149 * Add the current hit(s) and flush the write-queue out
100150 * to the global buffer:
100151 */
100152- atomic_add(nr_hits, &prof_buffer[pc]);
100153+ atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
100154 for (i = 0; i < NR_PROFILE_HIT; ++i) {
100155- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
100156+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
100157 hits[i].pc = hits[i].hits = 0;
100158 }
100159 out:
100160@@ -394,7 +394,7 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
100161 {
100162 unsigned long pc;
100163 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
100164- atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
100165+ atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
100166 }
100167 #endif /* !CONFIG_SMP */
100168
100169@@ -489,7 +489,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
100170 return -EFAULT;
100171 buf++; p++; count--; read++;
100172 }
100173- pnt = (char *)prof_buffer + p - sizeof(atomic_t);
100174+ pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
100175 if (copy_to_user(buf, (void *)pnt, count))
100176 return -EFAULT;
100177 read += count;
100178@@ -520,7 +520,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
100179 }
100180 #endif
100181 profile_discard_flip_buffers();
100182- memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
100183+ memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
100184 return count;
100185 }
100186
100187diff --git a/kernel/ptrace.c b/kernel/ptrace.c
100188index 9a34bd8..38d90e5 100644
100189--- a/kernel/ptrace.c
100190+++ b/kernel/ptrace.c
100191@@ -321,7 +321,7 @@ static int ptrace_attach(struct task_struct *task, long request,
100192 if (seize)
100193 flags |= PT_SEIZED;
100194 rcu_read_lock();
100195- if (ns_capable(__task_cred(task)->user_ns, CAP_SYS_PTRACE))
100196+ if (ns_capable_nolog(__task_cred(task)->user_ns, CAP_SYS_PTRACE))
100197 flags |= PT_PTRACE_CAP;
100198 rcu_read_unlock();
100199 task->ptrace = flags;
100200@@ -515,7 +515,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
100201 break;
100202 return -EIO;
100203 }
100204- if (copy_to_user(dst, buf, retval))
100205+ if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
100206 return -EFAULT;
100207 copied += retval;
100208 src += retval;
100209@@ -803,7 +803,7 @@ int ptrace_request(struct task_struct *child, long request,
100210 bool seized = child->ptrace & PT_SEIZED;
100211 int ret = -EIO;
100212 siginfo_t siginfo, *si;
100213- void __user *datavp = (void __user *) data;
100214+ void __user *datavp = (__force void __user *) data;
100215 unsigned long __user *datalp = datavp;
100216 unsigned long flags;
100217
100218@@ -1049,14 +1049,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
100219 goto out;
100220 }
100221
100222+ if (gr_handle_ptrace(child, request)) {
100223+ ret = -EPERM;
100224+ goto out_put_task_struct;
100225+ }
100226+
100227 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
100228 ret = ptrace_attach(child, request, addr, data);
100229 /*
100230 * Some architectures need to do book-keeping after
100231 * a ptrace attach.
100232 */
100233- if (!ret)
100234+ if (!ret) {
100235 arch_ptrace_attach(child);
100236+ gr_audit_ptrace(child);
100237+ }
100238 goto out_put_task_struct;
100239 }
100240
100241@@ -1084,7 +1091,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
100242 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
100243 if (copied != sizeof(tmp))
100244 return -EIO;
100245- return put_user(tmp, (unsigned long __user *)data);
100246+ return put_user(tmp, (__force unsigned long __user *)data);
100247 }
100248
100249 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
100250@@ -1177,7 +1184,7 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request,
100251 }
100252
100253 COMPAT_SYSCALL_DEFINE4(ptrace, compat_long_t, request, compat_long_t, pid,
100254- compat_long_t, addr, compat_long_t, data)
100255+ compat_ulong_t, addr, compat_ulong_t, data)
100256 {
100257 struct task_struct *child;
100258 long ret;
100259@@ -1193,14 +1200,21 @@ COMPAT_SYSCALL_DEFINE4(ptrace, compat_long_t, request, compat_long_t, pid,
100260 goto out;
100261 }
100262
100263+ if (gr_handle_ptrace(child, request)) {
100264+ ret = -EPERM;
100265+ goto out_put_task_struct;
100266+ }
100267+
100268 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
100269 ret = ptrace_attach(child, request, addr, data);
100270 /*
100271 * Some architectures need to do book-keeping after
100272 * a ptrace attach.
100273 */
100274- if (!ret)
100275+ if (!ret) {
100276 arch_ptrace_attach(child);
100277+ gr_audit_ptrace(child);
100278+ }
100279 goto out_put_task_struct;
100280 }
100281
100282diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c
100283index 30d42aa..cac5d66 100644
100284--- a/kernel/rcu/rcutorture.c
100285+++ b/kernel/rcu/rcutorture.c
100286@@ -134,12 +134,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1],
100287 rcu_torture_count) = { 0 };
100288 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1],
100289 rcu_torture_batch) = { 0 };
100290-static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
100291-static atomic_t n_rcu_torture_alloc;
100292-static atomic_t n_rcu_torture_alloc_fail;
100293-static atomic_t n_rcu_torture_free;
100294-static atomic_t n_rcu_torture_mberror;
100295-static atomic_t n_rcu_torture_error;
100296+static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
100297+static atomic_unchecked_t n_rcu_torture_alloc;
100298+static atomic_unchecked_t n_rcu_torture_alloc_fail;
100299+static atomic_unchecked_t n_rcu_torture_free;
100300+static atomic_unchecked_t n_rcu_torture_mberror;
100301+static atomic_unchecked_t n_rcu_torture_error;
100302 static long n_rcu_torture_barrier_error;
100303 static long n_rcu_torture_boost_ktrerror;
100304 static long n_rcu_torture_boost_rterror;
100305@@ -148,7 +148,7 @@ static long n_rcu_torture_boosts;
100306 static long n_rcu_torture_timers;
100307 static long n_barrier_attempts;
100308 static long n_barrier_successes;
100309-static atomic_long_t n_cbfloods;
100310+static atomic_long_unchecked_t n_cbfloods;
100311 static struct list_head rcu_torture_removed;
100312
100313 static int rcu_torture_writer_state;
100314@@ -211,11 +211,11 @@ rcu_torture_alloc(void)
100315
100316 spin_lock_bh(&rcu_torture_lock);
100317 if (list_empty(&rcu_torture_freelist)) {
100318- atomic_inc(&n_rcu_torture_alloc_fail);
100319+ atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
100320 spin_unlock_bh(&rcu_torture_lock);
100321 return NULL;
100322 }
100323- atomic_inc(&n_rcu_torture_alloc);
100324+ atomic_inc_unchecked(&n_rcu_torture_alloc);
100325 p = rcu_torture_freelist.next;
100326 list_del_init(p);
100327 spin_unlock_bh(&rcu_torture_lock);
100328@@ -228,7 +228,7 @@ rcu_torture_alloc(void)
100329 static void
100330 rcu_torture_free(struct rcu_torture *p)
100331 {
100332- atomic_inc(&n_rcu_torture_free);
100333+ atomic_inc_unchecked(&n_rcu_torture_free);
100334 spin_lock_bh(&rcu_torture_lock);
100335 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
100336 spin_unlock_bh(&rcu_torture_lock);
100337@@ -308,7 +308,7 @@ rcu_torture_pipe_update_one(struct rcu_torture *rp)
100338 i = rp->rtort_pipe_count;
100339 if (i > RCU_TORTURE_PIPE_LEN)
100340 i = RCU_TORTURE_PIPE_LEN;
100341- atomic_inc(&rcu_torture_wcount[i]);
100342+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
100343 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
100344 rp->rtort_mbtest = 0;
100345 return true;
100346@@ -796,7 +796,7 @@ rcu_torture_cbflood(void *arg)
100347 VERBOSE_TOROUT_STRING("rcu_torture_cbflood task started");
100348 do {
100349 schedule_timeout_interruptible(cbflood_inter_holdoff);
100350- atomic_long_inc(&n_cbfloods);
100351+ atomic_long_inc_unchecked(&n_cbfloods);
100352 WARN_ON(signal_pending(current));
100353 for (i = 0; i < cbflood_n_burst; i++) {
100354 for (j = 0; j < cbflood_n_per_burst; j++) {
100355@@ -915,7 +915,7 @@ rcu_torture_writer(void *arg)
100356 i = old_rp->rtort_pipe_count;
100357 if (i > RCU_TORTURE_PIPE_LEN)
100358 i = RCU_TORTURE_PIPE_LEN;
100359- atomic_inc(&rcu_torture_wcount[i]);
100360+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
100361 old_rp->rtort_pipe_count++;
100362 switch (synctype[torture_random(&rand) % nsynctypes]) {
100363 case RTWS_DEF_FREE:
100364@@ -1036,7 +1036,7 @@ static void rcu_torture_timer(unsigned long unused)
100365 return;
100366 }
100367 if (p->rtort_mbtest == 0)
100368- atomic_inc(&n_rcu_torture_mberror);
100369+ atomic_inc_unchecked(&n_rcu_torture_mberror);
100370 spin_lock(&rand_lock);
100371 cur_ops->read_delay(&rand);
100372 n_rcu_torture_timers++;
100373@@ -1111,7 +1111,7 @@ rcu_torture_reader(void *arg)
100374 continue;
100375 }
100376 if (p->rtort_mbtest == 0)
100377- atomic_inc(&n_rcu_torture_mberror);
100378+ atomic_inc_unchecked(&n_rcu_torture_mberror);
100379 cur_ops->read_delay(&rand);
100380 preempt_disable();
100381 pipe_count = p->rtort_pipe_count;
100382@@ -1180,11 +1180,11 @@ rcu_torture_stats_print(void)
100383 rcu_torture_current,
100384 rcu_torture_current_version,
100385 list_empty(&rcu_torture_freelist),
100386- atomic_read(&n_rcu_torture_alloc),
100387- atomic_read(&n_rcu_torture_alloc_fail),
100388- atomic_read(&n_rcu_torture_free));
100389+ atomic_read_unchecked(&n_rcu_torture_alloc),
100390+ atomic_read_unchecked(&n_rcu_torture_alloc_fail),
100391+ atomic_read_unchecked(&n_rcu_torture_free));
100392 pr_cont("rtmbe: %d rtbke: %ld rtbre: %ld ",
100393- atomic_read(&n_rcu_torture_mberror),
100394+ atomic_read_unchecked(&n_rcu_torture_mberror),
100395 n_rcu_torture_boost_ktrerror,
100396 n_rcu_torture_boost_rterror);
100397 pr_cont("rtbf: %ld rtb: %ld nt: %ld ",
100398@@ -1196,17 +1196,17 @@ rcu_torture_stats_print(void)
100399 n_barrier_successes,
100400 n_barrier_attempts,
100401 n_rcu_torture_barrier_error);
100402- pr_cont("cbflood: %ld\n", atomic_long_read(&n_cbfloods));
100403+ pr_cont("cbflood: %ld\n", atomic_long_read_unchecked(&n_cbfloods));
100404
100405 pr_alert("%s%s ", torture_type, TORTURE_FLAG);
100406- if (atomic_read(&n_rcu_torture_mberror) != 0 ||
100407+ if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
100408 n_rcu_torture_barrier_error != 0 ||
100409 n_rcu_torture_boost_ktrerror != 0 ||
100410 n_rcu_torture_boost_rterror != 0 ||
100411 n_rcu_torture_boost_failure != 0 ||
100412 i > 1) {
100413 pr_cont("%s", "!!! ");
100414- atomic_inc(&n_rcu_torture_error);
100415+ atomic_inc_unchecked(&n_rcu_torture_error);
100416 WARN_ON_ONCE(1);
100417 }
100418 pr_cont("Reader Pipe: ");
100419@@ -1223,7 +1223,7 @@ rcu_torture_stats_print(void)
100420 pr_alert("%s%s ", torture_type, TORTURE_FLAG);
100421 pr_cont("Free-Block Circulation: ");
100422 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
100423- pr_cont(" %d", atomic_read(&rcu_torture_wcount[i]));
100424+ pr_cont(" %d", atomic_read_unchecked(&rcu_torture_wcount[i]));
100425 }
100426 pr_cont("\n");
100427
100428@@ -1570,7 +1570,7 @@ rcu_torture_cleanup(void)
100429
100430 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */
100431
100432- if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
100433+ if (atomic_read_unchecked(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
100434 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
100435 else if (torture_onoff_failures())
100436 rcu_torture_print_module_parms(cur_ops,
100437@@ -1695,18 +1695,18 @@ rcu_torture_init(void)
100438
100439 rcu_torture_current = NULL;
100440 rcu_torture_current_version = 0;
100441- atomic_set(&n_rcu_torture_alloc, 0);
100442- atomic_set(&n_rcu_torture_alloc_fail, 0);
100443- atomic_set(&n_rcu_torture_free, 0);
100444- atomic_set(&n_rcu_torture_mberror, 0);
100445- atomic_set(&n_rcu_torture_error, 0);
100446+ atomic_set_unchecked(&n_rcu_torture_alloc, 0);
100447+ atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
100448+ atomic_set_unchecked(&n_rcu_torture_free, 0);
100449+ atomic_set_unchecked(&n_rcu_torture_mberror, 0);
100450+ atomic_set_unchecked(&n_rcu_torture_error, 0);
100451 n_rcu_torture_barrier_error = 0;
100452 n_rcu_torture_boost_ktrerror = 0;
100453 n_rcu_torture_boost_rterror = 0;
100454 n_rcu_torture_boost_failure = 0;
100455 n_rcu_torture_boosts = 0;
100456 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
100457- atomic_set(&rcu_torture_wcount[i], 0);
100458+ atomic_set_unchecked(&rcu_torture_wcount[i], 0);
100459 for_each_possible_cpu(cpu) {
100460 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
100461 per_cpu(rcu_torture_count, cpu)[i] = 0;
100462diff --git a/kernel/rcu/tiny.c b/kernel/rcu/tiny.c
100463index cc9ceca..ce075a6 100644
100464--- a/kernel/rcu/tiny.c
100465+++ b/kernel/rcu/tiny.c
100466@@ -42,7 +42,7 @@
100467 /* Forward declarations for tiny_plugin.h. */
100468 struct rcu_ctrlblk;
100469 static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp);
100470-static void rcu_process_callbacks(struct softirq_action *unused);
100471+static void rcu_process_callbacks(void);
100472 static void __call_rcu(struct rcu_head *head,
100473 void (*func)(struct rcu_head *rcu),
100474 struct rcu_ctrlblk *rcp);
100475@@ -210,7 +210,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
100476 false));
100477 }
100478
100479-static void rcu_process_callbacks(struct softirq_action *unused)
100480+static __latent_entropy void rcu_process_callbacks(void)
100481 {
100482 __rcu_process_callbacks(&rcu_sched_ctrlblk);
100483 __rcu_process_callbacks(&rcu_bh_ctrlblk);
100484diff --git a/kernel/rcu/tiny_plugin.h b/kernel/rcu/tiny_plugin.h
100485index f94e209..d2985bd 100644
100486--- a/kernel/rcu/tiny_plugin.h
100487+++ b/kernel/rcu/tiny_plugin.h
100488@@ -150,10 +150,10 @@ static void check_cpu_stall(struct rcu_ctrlblk *rcp)
100489 rcp->name, rcp->ticks_this_gp, DYNTICK_TASK_EXIT_IDLE,
100490 jiffies - rcp->gp_start, rcp->qlen);
100491 dump_stack();
100492- ACCESS_ONCE(rcp->jiffies_stall) = jiffies +
100493+ ACCESS_ONCE_RW(rcp->jiffies_stall) = jiffies +
100494 3 * rcu_jiffies_till_stall_check() + 3;
100495 } else if (ULONG_CMP_GE(j, js)) {
100496- ACCESS_ONCE(rcp->jiffies_stall) = jiffies + rcu_jiffies_till_stall_check();
100497+ ACCESS_ONCE_RW(rcp->jiffies_stall) = jiffies + rcu_jiffies_till_stall_check();
100498 }
100499 }
100500
100501@@ -161,7 +161,7 @@ static void reset_cpu_stall_ticks(struct rcu_ctrlblk *rcp)
100502 {
100503 rcp->ticks_this_gp = 0;
100504 rcp->gp_start = jiffies;
100505- ACCESS_ONCE(rcp->jiffies_stall) = jiffies + rcu_jiffies_till_stall_check();
100506+ ACCESS_ONCE_RW(rcp->jiffies_stall) = jiffies + rcu_jiffies_till_stall_check();
100507 }
100508
100509 static void check_cpu_stalls(void)
100510diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
100511index 48d640c..9401d30 100644
100512--- a/kernel/rcu/tree.c
100513+++ b/kernel/rcu/tree.c
100514@@ -268,7 +268,7 @@ static void rcu_momentary_dyntick_idle(void)
100515 */
100516 rdtp = this_cpu_ptr(&rcu_dynticks);
100517 smp_mb__before_atomic(); /* Earlier stuff before QS. */
100518- atomic_add(2, &rdtp->dynticks); /* QS. */
100519+ atomic_add_unchecked(2, &rdtp->dynticks); /* QS. */
100520 smp_mb__after_atomic(); /* Later stuff after QS. */
100521 break;
100522 }
100523@@ -580,9 +580,9 @@ static void rcu_eqs_enter_common(long long oldval, bool user)
100524 rcu_prepare_for_idle();
100525 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
100526 smp_mb__before_atomic(); /* See above. */
100527- atomic_inc(&rdtp->dynticks);
100528+ atomic_inc_unchecked(&rdtp->dynticks);
100529 smp_mb__after_atomic(); /* Force ordering with next sojourn. */
100530- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
100531+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
100532 rcu_dynticks_task_enter();
100533
100534 /*
100535@@ -703,10 +703,10 @@ static void rcu_eqs_exit_common(long long oldval, int user)
100536
100537 rcu_dynticks_task_exit();
100538 smp_mb__before_atomic(); /* Force ordering w/previous sojourn. */
100539- atomic_inc(&rdtp->dynticks);
100540+ atomic_inc_unchecked(&rdtp->dynticks);
100541 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
100542 smp_mb__after_atomic(); /* See above. */
100543- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
100544+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
100545 rcu_cleanup_after_idle();
100546 trace_rcu_dyntick(TPS("End"), oldval, rdtp->dynticks_nesting);
100547 if (!user && !is_idle_task(current)) {
100548@@ -840,12 +840,12 @@ void rcu_nmi_enter(void)
100549 * to be in the outermost NMI handler that interrupted an RCU-idle
100550 * period (observation due to Andy Lutomirski).
100551 */
100552- if (!(atomic_read(&rdtp->dynticks) & 0x1)) {
100553+ if (!(atomic_read_unchecked(&rdtp->dynticks) & 0x1)) {
100554 smp_mb__before_atomic(); /* Force delay from prior write. */
100555- atomic_inc(&rdtp->dynticks);
100556+ atomic_inc_unchecked(&rdtp->dynticks);
100557 /* atomic_inc() before later RCU read-side crit sects */
100558 smp_mb__after_atomic(); /* See above. */
100559- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
100560+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
100561 incby = 1;
100562 }
100563 rdtp->dynticks_nmi_nesting += incby;
100564@@ -870,7 +870,7 @@ void rcu_nmi_exit(void)
100565 * to us!)
100566 */
100567 WARN_ON_ONCE(rdtp->dynticks_nmi_nesting <= 0);
100568- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
100569+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
100570
100571 /*
100572 * If the nesting level is not 1, the CPU wasn't RCU-idle, so
100573@@ -885,9 +885,9 @@ void rcu_nmi_exit(void)
100574 rdtp->dynticks_nmi_nesting = 0;
100575 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
100576 smp_mb__before_atomic(); /* See above. */
100577- atomic_inc(&rdtp->dynticks);
100578+ atomic_inc_unchecked(&rdtp->dynticks);
100579 smp_mb__after_atomic(); /* Force delay to next write. */
100580- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
100581+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
100582 }
100583
100584 /**
100585@@ -900,7 +900,7 @@ void rcu_nmi_exit(void)
100586 */
100587 bool notrace __rcu_is_watching(void)
100588 {
100589- return atomic_read(this_cpu_ptr(&rcu_dynticks.dynticks)) & 0x1;
100590+ return atomic_read_unchecked(this_cpu_ptr(&rcu_dynticks.dynticks)) & 0x1;
100591 }
100592
100593 /**
100594@@ -983,7 +983,7 @@ static int rcu_is_cpu_rrupt_from_idle(void)
100595 static int dyntick_save_progress_counter(struct rcu_data *rdp,
100596 bool *isidle, unsigned long *maxj)
100597 {
100598- rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
100599+ rdp->dynticks_snap = atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
100600 rcu_sysidle_check_cpu(rdp, isidle, maxj);
100601 if ((rdp->dynticks_snap & 0x1) == 0) {
100602 trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti"));
100603@@ -991,7 +991,7 @@ static int dyntick_save_progress_counter(struct rcu_data *rdp,
100604 } else {
100605 if (ULONG_CMP_LT(ACCESS_ONCE(rdp->gpnum) + ULONG_MAX / 4,
100606 rdp->mynode->gpnum))
100607- ACCESS_ONCE(rdp->gpwrap) = true;
100608+ ACCESS_ONCE_RW(rdp->gpwrap) = true;
100609 return 0;
100610 }
100611 }
100612@@ -1009,7 +1009,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
100613 int *rcrmp;
100614 unsigned int snap;
100615
100616- curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
100617+ curr = (unsigned int)atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
100618 snap = (unsigned int)rdp->dynticks_snap;
100619
100620 /*
100621@@ -1072,10 +1072,10 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
100622 rdp->rsp->gp_start + jiffies_till_sched_qs) ||
100623 ULONG_CMP_GE(jiffies, rdp->rsp->jiffies_resched)) {
100624 if (!(ACCESS_ONCE(*rcrmp) & rdp->rsp->flavor_mask)) {
100625- ACCESS_ONCE(rdp->cond_resched_completed) =
100626+ ACCESS_ONCE_RW(rdp->cond_resched_completed) =
100627 ACCESS_ONCE(rdp->mynode->completed);
100628 smp_mb(); /* ->cond_resched_completed before *rcrmp. */
100629- ACCESS_ONCE(*rcrmp) =
100630+ ACCESS_ONCE_RW(*rcrmp) =
100631 ACCESS_ONCE(*rcrmp) + rdp->rsp->flavor_mask;
100632 resched_cpu(rdp->cpu); /* Force CPU into scheduler. */
100633 rdp->rsp->jiffies_resched += 5; /* Enable beating. */
100634@@ -1097,7 +1097,7 @@ static void record_gp_stall_check_time(struct rcu_state *rsp)
100635 rsp->gp_start = j;
100636 smp_wmb(); /* Record start time before stall time. */
100637 j1 = rcu_jiffies_till_stall_check();
100638- ACCESS_ONCE(rsp->jiffies_stall) = j + j1;
100639+ ACCESS_ONCE_RW(rsp->jiffies_stall) = j + j1;
100640 rsp->jiffies_resched = j + j1 / 2;
100641 rsp->n_force_qs_gpstart = ACCESS_ONCE(rsp->n_force_qs);
100642 }
100643@@ -1156,7 +1156,7 @@ static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gpnum)
100644 raw_spin_unlock_irqrestore(&rnp->lock, flags);
100645 return;
100646 }
100647- ACCESS_ONCE(rsp->jiffies_stall) = jiffies + 3 * rcu_jiffies_till_stall_check() + 3;
100648+ ACCESS_ONCE_RW(rsp->jiffies_stall) = jiffies + 3 * rcu_jiffies_till_stall_check() + 3;
100649 raw_spin_unlock_irqrestore(&rnp->lock, flags);
100650
100651 /*
100652@@ -1240,7 +1240,7 @@ static void print_cpu_stall(struct rcu_state *rsp)
100653
100654 raw_spin_lock_irqsave(&rnp->lock, flags);
100655 if (ULONG_CMP_GE(jiffies, ACCESS_ONCE(rsp->jiffies_stall)))
100656- ACCESS_ONCE(rsp->jiffies_stall) = jiffies +
100657+ ACCESS_ONCE_RW(rsp->jiffies_stall) = jiffies +
100658 3 * rcu_jiffies_till_stall_check() + 3;
100659 raw_spin_unlock_irqrestore(&rnp->lock, flags);
100660
100661@@ -1324,7 +1324,7 @@ void rcu_cpu_stall_reset(void)
100662 struct rcu_state *rsp;
100663
100664 for_each_rcu_flavor(rsp)
100665- ACCESS_ONCE(rsp->jiffies_stall) = jiffies + ULONG_MAX / 2;
100666+ ACCESS_ONCE_RW(rsp->jiffies_stall) = jiffies + ULONG_MAX / 2;
100667 }
100668
100669 /*
100670@@ -1671,7 +1671,7 @@ static bool __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp,
100671 rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_qs_ctr);
100672 rdp->qs_pending = !!(rnp->qsmask & rdp->grpmask);
100673 zero_cpu_stall_ticks(rdp);
100674- ACCESS_ONCE(rdp->gpwrap) = false;
100675+ ACCESS_ONCE_RW(rdp->gpwrap) = false;
100676 }
100677 return ret;
100678 }
100679@@ -1706,7 +1706,7 @@ static int rcu_gp_init(struct rcu_state *rsp)
100680 struct rcu_data *rdp;
100681 struct rcu_node *rnp = rcu_get_root(rsp);
100682
100683- ACCESS_ONCE(rsp->gp_activity) = jiffies;
100684+ ACCESS_ONCE_RW(rsp->gp_activity) = jiffies;
100685 rcu_bind_gp_kthread();
100686 raw_spin_lock_irq(&rnp->lock);
100687 smp_mb__after_unlock_lock();
100688@@ -1715,7 +1715,7 @@ static int rcu_gp_init(struct rcu_state *rsp)
100689 raw_spin_unlock_irq(&rnp->lock);
100690 return 0;
100691 }
100692- ACCESS_ONCE(rsp->gp_flags) = 0; /* Clear all flags: New grace period. */
100693+ ACCESS_ONCE_RW(rsp->gp_flags) = 0; /* Clear all flags: New grace period. */
100694
100695 if (WARN_ON_ONCE(rcu_gp_in_progress(rsp))) {
100696 /*
100697@@ -1756,9 +1756,9 @@ static int rcu_gp_init(struct rcu_state *rsp)
100698 rdp = this_cpu_ptr(rsp->rda);
100699 rcu_preempt_check_blocked_tasks(rnp);
100700 rnp->qsmask = rnp->qsmaskinit;
100701- ACCESS_ONCE(rnp->gpnum) = rsp->gpnum;
100702+ ACCESS_ONCE_RW(rnp->gpnum) = rsp->gpnum;
100703 WARN_ON_ONCE(rnp->completed != rsp->completed);
100704- ACCESS_ONCE(rnp->completed) = rsp->completed;
100705+ ACCESS_ONCE_RW(rnp->completed) = rsp->completed;
100706 if (rnp == rdp->mynode)
100707 (void)__note_gp_changes(rsp, rnp, rdp);
100708 rcu_preempt_boost_start_gp(rnp);
100709@@ -1767,7 +1767,7 @@ static int rcu_gp_init(struct rcu_state *rsp)
100710 rnp->grphi, rnp->qsmask);
100711 raw_spin_unlock_irq(&rnp->lock);
100712 cond_resched_rcu_qs();
100713- ACCESS_ONCE(rsp->gp_activity) = jiffies;
100714+ ACCESS_ONCE_RW(rsp->gp_activity) = jiffies;
100715 }
100716
100717 mutex_unlock(&rsp->onoff_mutex);
100718@@ -1784,7 +1784,7 @@ static int rcu_gp_fqs(struct rcu_state *rsp, int fqs_state_in)
100719 unsigned long maxj;
100720 struct rcu_node *rnp = rcu_get_root(rsp);
100721
100722- ACCESS_ONCE(rsp->gp_activity) = jiffies;
100723+ ACCESS_ONCE_RW(rsp->gp_activity) = jiffies;
100724 rsp->n_force_qs++;
100725 if (fqs_state == RCU_SAVE_DYNTICK) {
100726 /* Collect dyntick-idle snapshots. */
100727@@ -1805,7 +1805,7 @@ static int rcu_gp_fqs(struct rcu_state *rsp, int fqs_state_in)
100728 if (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
100729 raw_spin_lock_irq(&rnp->lock);
100730 smp_mb__after_unlock_lock();
100731- ACCESS_ONCE(rsp->gp_flags) =
100732+ ACCESS_ONCE_RW(rsp->gp_flags) =
100733 ACCESS_ONCE(rsp->gp_flags) & ~RCU_GP_FLAG_FQS;
100734 raw_spin_unlock_irq(&rnp->lock);
100735 }
100736@@ -1823,7 +1823,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
100737 struct rcu_data *rdp;
100738 struct rcu_node *rnp = rcu_get_root(rsp);
100739
100740- ACCESS_ONCE(rsp->gp_activity) = jiffies;
100741+ ACCESS_ONCE_RW(rsp->gp_activity) = jiffies;
100742 raw_spin_lock_irq(&rnp->lock);
100743 smp_mb__after_unlock_lock();
100744 gp_duration = jiffies - rsp->gp_start;
100745@@ -1852,7 +1852,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
100746 rcu_for_each_node_breadth_first(rsp, rnp) {
100747 raw_spin_lock_irq(&rnp->lock);
100748 smp_mb__after_unlock_lock();
100749- ACCESS_ONCE(rnp->completed) = rsp->gpnum;
100750+ ACCESS_ONCE_RW(rnp->completed) = rsp->gpnum;
100751 rdp = this_cpu_ptr(rsp->rda);
100752 if (rnp == rdp->mynode)
100753 needgp = __note_gp_changes(rsp, rnp, rdp) || needgp;
100754@@ -1860,7 +1860,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
100755 nocb += rcu_future_gp_cleanup(rsp, rnp);
100756 raw_spin_unlock_irq(&rnp->lock);
100757 cond_resched_rcu_qs();
100758- ACCESS_ONCE(rsp->gp_activity) = jiffies;
100759+ ACCESS_ONCE_RW(rsp->gp_activity) = jiffies;
100760 }
100761 rnp = rcu_get_root(rsp);
100762 raw_spin_lock_irq(&rnp->lock);
100763@@ -1868,14 +1868,14 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
100764 rcu_nocb_gp_set(rnp, nocb);
100765
100766 /* Declare grace period done. */
100767- ACCESS_ONCE(rsp->completed) = rsp->gpnum;
100768+ ACCESS_ONCE_RW(rsp->completed) = rsp->gpnum;
100769 trace_rcu_grace_period(rsp->name, rsp->completed, TPS("end"));
100770 rsp->fqs_state = RCU_GP_IDLE;
100771 rdp = this_cpu_ptr(rsp->rda);
100772 /* Advance CBs to reduce false positives below. */
100773 needgp = rcu_advance_cbs(rsp, rnp, rdp) || needgp;
100774 if (needgp || cpu_needs_another_gp(rsp, rdp)) {
100775- ACCESS_ONCE(rsp->gp_flags) = RCU_GP_FLAG_INIT;
100776+ ACCESS_ONCE_RW(rsp->gp_flags) = RCU_GP_FLAG_INIT;
100777 trace_rcu_grace_period(rsp->name,
100778 ACCESS_ONCE(rsp->gpnum),
100779 TPS("newreq"));
100780@@ -1910,7 +1910,7 @@ static int __noreturn rcu_gp_kthread(void *arg)
100781 if (rcu_gp_init(rsp))
100782 break;
100783 cond_resched_rcu_qs();
100784- ACCESS_ONCE(rsp->gp_activity) = jiffies;
100785+ ACCESS_ONCE_RW(rsp->gp_activity) = jiffies;
100786 WARN_ON(signal_pending(current));
100787 trace_rcu_grace_period(rsp->name,
100788 ACCESS_ONCE(rsp->gpnum),
100789@@ -1954,11 +1954,11 @@ static int __noreturn rcu_gp_kthread(void *arg)
100790 ACCESS_ONCE(rsp->gpnum),
100791 TPS("fqsend"));
100792 cond_resched_rcu_qs();
100793- ACCESS_ONCE(rsp->gp_activity) = jiffies;
100794+ ACCESS_ONCE_RW(rsp->gp_activity) = jiffies;
100795 } else {
100796 /* Deal with stray signal. */
100797 cond_resched_rcu_qs();
100798- ACCESS_ONCE(rsp->gp_activity) = jiffies;
100799+ ACCESS_ONCE_RW(rsp->gp_activity) = jiffies;
100800 WARN_ON(signal_pending(current));
100801 trace_rcu_grace_period(rsp->name,
100802 ACCESS_ONCE(rsp->gpnum),
100803@@ -2003,7 +2003,7 @@ rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp,
100804 */
100805 return false;
100806 }
100807- ACCESS_ONCE(rsp->gp_flags) = RCU_GP_FLAG_INIT;
100808+ ACCESS_ONCE_RW(rsp->gp_flags) = RCU_GP_FLAG_INIT;
100809 trace_rcu_grace_period(rsp->name, ACCESS_ONCE(rsp->gpnum),
100810 TPS("newreq"));
100811
100812@@ -2228,7 +2228,7 @@ rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp,
100813 rsp->qlen += rdp->qlen;
100814 rdp->n_cbs_orphaned += rdp->qlen;
100815 rdp->qlen_lazy = 0;
100816- ACCESS_ONCE(rdp->qlen) = 0;
100817+ ACCESS_ONCE_RW(rdp->qlen) = 0;
100818 }
100819
100820 /*
100821@@ -2490,7 +2490,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
100822 }
100823 smp_mb(); /* List handling before counting for rcu_barrier(). */
100824 rdp->qlen_lazy -= count_lazy;
100825- ACCESS_ONCE(rdp->qlen) = rdp->qlen - count;
100826+ ACCESS_ONCE_RW(rdp->qlen) = rdp->qlen - count;
100827 rdp->n_cbs_invoked += count;
100828
100829 /* Reinstate batch limit if we have worked down the excess. */
100830@@ -2647,7 +2647,7 @@ static void force_quiescent_state(struct rcu_state *rsp)
100831 raw_spin_unlock_irqrestore(&rnp_old->lock, flags);
100832 return; /* Someone beat us to it. */
100833 }
100834- ACCESS_ONCE(rsp->gp_flags) =
100835+ ACCESS_ONCE_RW(rsp->gp_flags) =
100836 ACCESS_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS;
100837 raw_spin_unlock_irqrestore(&rnp_old->lock, flags);
100838 rcu_gp_kthread_wake(rsp);
100839@@ -2693,7 +2693,7 @@ __rcu_process_callbacks(struct rcu_state *rsp)
100840 /*
100841 * Do RCU core processing for the current CPU.
100842 */
100843-static void rcu_process_callbacks(struct softirq_action *unused)
100844+static void rcu_process_callbacks(void)
100845 {
100846 struct rcu_state *rsp;
100847
100848@@ -2805,7 +2805,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
100849 WARN_ON_ONCE((unsigned long)head & 0x1); /* Misaligned rcu_head! */
100850 if (debug_rcu_head_queue(head)) {
100851 /* Probable double call_rcu(), so leak the callback. */
100852- ACCESS_ONCE(head->func) = rcu_leak_callback;
100853+ ACCESS_ONCE_RW(head->func) = rcu_leak_callback;
100854 WARN_ONCE(1, "__call_rcu(): Leaked duplicate callback\n");
100855 return;
100856 }
100857@@ -2833,7 +2833,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
100858 local_irq_restore(flags);
100859 return;
100860 }
100861- ACCESS_ONCE(rdp->qlen) = rdp->qlen + 1;
100862+ ACCESS_ONCE_RW(rdp->qlen) = rdp->qlen + 1;
100863 if (lazy)
100864 rdp->qlen_lazy++;
100865 else
100866@@ -3106,11 +3106,11 @@ void synchronize_sched_expedited(void)
100867 * counter wrap on a 32-bit system. Quite a few more CPUs would of
100868 * course be required on a 64-bit system.
100869 */
100870- if (ULONG_CMP_GE((ulong)atomic_long_read(&rsp->expedited_start),
100871+ if (ULONG_CMP_GE((ulong)atomic_long_read_unchecked(&rsp->expedited_start),
100872 (ulong)atomic_long_read(&rsp->expedited_done) +
100873 ULONG_MAX / 8)) {
100874 synchronize_sched();
100875- atomic_long_inc(&rsp->expedited_wrap);
100876+ atomic_long_inc_unchecked(&rsp->expedited_wrap);
100877 return;
100878 }
100879
100880@@ -3118,12 +3118,12 @@ void synchronize_sched_expedited(void)
100881 * Take a ticket. Note that atomic_inc_return() implies a
100882 * full memory barrier.
100883 */
100884- snap = atomic_long_inc_return(&rsp->expedited_start);
100885+ snap = atomic_long_inc_return_unchecked(&rsp->expedited_start);
100886 firstsnap = snap;
100887 if (!try_get_online_cpus()) {
100888 /* CPU hotplug operation in flight, fall back to normal GP. */
100889 wait_rcu_gp(call_rcu_sched);
100890- atomic_long_inc(&rsp->expedited_normal);
100891+ atomic_long_inc_unchecked(&rsp->expedited_normal);
100892 return;
100893 }
100894 WARN_ON_ONCE(cpu_is_offline(raw_smp_processor_id()));
100895@@ -3136,7 +3136,7 @@ void synchronize_sched_expedited(void)
100896 for_each_cpu(cpu, cm) {
100897 struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
100898
100899- if (!(atomic_add_return(0, &rdtp->dynticks) & 0x1))
100900+ if (!(atomic_add_return_unchecked(0, &rdtp->dynticks) & 0x1))
100901 cpumask_clear_cpu(cpu, cm);
100902 }
100903 if (cpumask_weight(cm) == 0)
100904@@ -3151,14 +3151,14 @@ void synchronize_sched_expedited(void)
100905 synchronize_sched_expedited_cpu_stop,
100906 NULL) == -EAGAIN) {
100907 put_online_cpus();
100908- atomic_long_inc(&rsp->expedited_tryfail);
100909+ atomic_long_inc_unchecked(&rsp->expedited_tryfail);
100910
100911 /* Check to see if someone else did our work for us. */
100912 s = atomic_long_read(&rsp->expedited_done);
100913 if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
100914 /* ensure test happens before caller kfree */
100915 smp_mb__before_atomic(); /* ^^^ */
100916- atomic_long_inc(&rsp->expedited_workdone1);
100917+ atomic_long_inc_unchecked(&rsp->expedited_workdone1);
100918 free_cpumask_var(cm);
100919 return;
100920 }
100921@@ -3168,7 +3168,7 @@ void synchronize_sched_expedited(void)
100922 udelay(trycount * num_online_cpus());
100923 } else {
100924 wait_rcu_gp(call_rcu_sched);
100925- atomic_long_inc(&rsp->expedited_normal);
100926+ atomic_long_inc_unchecked(&rsp->expedited_normal);
100927 free_cpumask_var(cm);
100928 return;
100929 }
100930@@ -3178,7 +3178,7 @@ void synchronize_sched_expedited(void)
100931 if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
100932 /* ensure test happens before caller kfree */
100933 smp_mb__before_atomic(); /* ^^^ */
100934- atomic_long_inc(&rsp->expedited_workdone2);
100935+ atomic_long_inc_unchecked(&rsp->expedited_workdone2);
100936 free_cpumask_var(cm);
100937 return;
100938 }
100939@@ -3193,14 +3193,14 @@ void synchronize_sched_expedited(void)
100940 if (!try_get_online_cpus()) {
100941 /* CPU hotplug operation in flight, use normal GP. */
100942 wait_rcu_gp(call_rcu_sched);
100943- atomic_long_inc(&rsp->expedited_normal);
100944+ atomic_long_inc_unchecked(&rsp->expedited_normal);
100945 free_cpumask_var(cm);
100946 return;
100947 }
100948- snap = atomic_long_read(&rsp->expedited_start);
100949+ snap = atomic_long_read_unchecked(&rsp->expedited_start);
100950 smp_mb(); /* ensure read is before try_stop_cpus(). */
100951 }
100952- atomic_long_inc(&rsp->expedited_stoppedcpus);
100953+ atomic_long_inc_unchecked(&rsp->expedited_stoppedcpus);
100954
100955 all_cpus_idle:
100956 free_cpumask_var(cm);
100957@@ -3212,16 +3212,16 @@ all_cpus_idle:
100958 * than we did already did their update.
100959 */
100960 do {
100961- atomic_long_inc(&rsp->expedited_done_tries);
100962+ atomic_long_inc_unchecked(&rsp->expedited_done_tries);
100963 s = atomic_long_read(&rsp->expedited_done);
100964 if (ULONG_CMP_GE((ulong)s, (ulong)snap)) {
100965 /* ensure test happens before caller kfree */
100966 smp_mb__before_atomic(); /* ^^^ */
100967- atomic_long_inc(&rsp->expedited_done_lost);
100968+ atomic_long_inc_unchecked(&rsp->expedited_done_lost);
100969 break;
100970 }
100971 } while (atomic_long_cmpxchg(&rsp->expedited_done, s, snap) != s);
100972- atomic_long_inc(&rsp->expedited_done_exit);
100973+ atomic_long_inc_unchecked(&rsp->expedited_done_exit);
100974
100975 put_online_cpus();
100976 }
100977@@ -3431,7 +3431,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
100978 * ACCESS_ONCE() to prevent the compiler from speculating
100979 * the increment to precede the early-exit check.
100980 */
100981- ACCESS_ONCE(rsp->n_barrier_done) = rsp->n_barrier_done + 1;
100982+ ACCESS_ONCE_RW(rsp->n_barrier_done) = rsp->n_barrier_done + 1;
100983 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 1);
100984 _rcu_barrier_trace(rsp, "Inc1", -1, rsp->n_barrier_done);
100985 smp_mb(); /* Order ->n_barrier_done increment with below mechanism. */
100986@@ -3487,7 +3487,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
100987
100988 /* Increment ->n_barrier_done to prevent duplicate work. */
100989 smp_mb(); /* Keep increment after above mechanism. */
100990- ACCESS_ONCE(rsp->n_barrier_done) = rsp->n_barrier_done + 1;
100991+ ACCESS_ONCE_RW(rsp->n_barrier_done) = rsp->n_barrier_done + 1;
100992 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 0);
100993 _rcu_barrier_trace(rsp, "Inc2", -1, rsp->n_barrier_done);
100994 smp_mb(); /* Keep increment before caller's subsequent code. */
100995@@ -3532,7 +3532,7 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
100996 rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo);
100997 rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
100998 WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE);
100999- WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
101000+ WARN_ON_ONCE(atomic_read_unchecked(&rdp->dynticks->dynticks) != 1);
101001 rdp->cpu = cpu;
101002 rdp->rsp = rsp;
101003 rcu_boot_init_nocb_percpu_data(rdp);
101004@@ -3565,8 +3565,8 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
101005 init_callback_list(rdp); /* Re-enable callbacks on this CPU. */
101006 rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
101007 rcu_sysidle_init_percpu_data(rdp->dynticks);
101008- atomic_set(&rdp->dynticks->dynticks,
101009- (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1);
101010+ atomic_set_unchecked(&rdp->dynticks->dynticks,
101011+ (atomic_read_unchecked(&rdp->dynticks->dynticks) & ~0x1) + 1);
101012 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
101013
101014 /* Add CPU to rcu_node bitmasks. */
101015diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
101016index 119de39..f07d31a 100644
101017--- a/kernel/rcu/tree.h
101018+++ b/kernel/rcu/tree.h
101019@@ -86,11 +86,11 @@ struct rcu_dynticks {
101020 long long dynticks_nesting; /* Track irq/process nesting level. */
101021 /* Process level is worth LLONG_MAX/2. */
101022 int dynticks_nmi_nesting; /* Track NMI nesting level. */
101023- atomic_t dynticks; /* Even value for idle, else odd. */
101024+ atomic_unchecked_t dynticks;/* Even value for idle, else odd. */
101025 #ifdef CONFIG_NO_HZ_FULL_SYSIDLE
101026 long long dynticks_idle_nesting;
101027 /* irq/process nesting level from idle. */
101028- atomic_t dynticks_idle; /* Even value for idle, else odd. */
101029+ atomic_unchecked_t dynticks_idle;/* Even value for idle, else odd. */
101030 /* "Idle" excludes userspace execution. */
101031 unsigned long dynticks_idle_jiffies;
101032 /* End of last non-NMI non-idle period. */
101033@@ -457,17 +457,17 @@ struct rcu_state {
101034 /* _rcu_barrier(). */
101035 /* End of fields guarded by barrier_mutex. */
101036
101037- atomic_long_t expedited_start; /* Starting ticket. */
101038- atomic_long_t expedited_done; /* Done ticket. */
101039- atomic_long_t expedited_wrap; /* # near-wrap incidents. */
101040- atomic_long_t expedited_tryfail; /* # acquisition failures. */
101041- atomic_long_t expedited_workdone1; /* # done by others #1. */
101042- atomic_long_t expedited_workdone2; /* # done by others #2. */
101043- atomic_long_t expedited_normal; /* # fallbacks to normal. */
101044- atomic_long_t expedited_stoppedcpus; /* # successful stop_cpus. */
101045- atomic_long_t expedited_done_tries; /* # tries to update _done. */
101046- atomic_long_t expedited_done_lost; /* # times beaten to _done. */
101047- atomic_long_t expedited_done_exit; /* # times exited _done loop. */
101048+ atomic_long_unchecked_t expedited_start; /* Starting ticket. */
101049+ atomic_long_t expedited_done; /* Done ticket. */
101050+ atomic_long_unchecked_t expedited_wrap; /* # near-wrap incidents. */
101051+ atomic_long_unchecked_t expedited_tryfail; /* # acquisition failures. */
101052+ atomic_long_unchecked_t expedited_workdone1; /* # done by others #1. */
101053+ atomic_long_unchecked_t expedited_workdone2; /* # done by others #2. */
101054+ atomic_long_unchecked_t expedited_normal; /* # fallbacks to normal. */
101055+ atomic_long_unchecked_t expedited_stoppedcpus; /* # successful stop_cpus. */
101056+ atomic_long_unchecked_t expedited_done_tries; /* # tries to update _done. */
101057+ atomic_long_unchecked_t expedited_done_lost; /* # times beaten to _done. */
101058+ atomic_long_unchecked_t expedited_done_exit; /* # times exited _done loop. */
101059
101060 unsigned long jiffies_force_qs; /* Time at which to invoke */
101061 /* force_quiescent_state(). */
101062diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
101063index 0a571e9..fbfd611 100644
101064--- a/kernel/rcu/tree_plugin.h
101065+++ b/kernel/rcu/tree_plugin.h
101066@@ -619,7 +619,7 @@ static int rcu_preempted_readers_exp(struct rcu_node *rnp)
101067 static int sync_rcu_preempt_exp_done(struct rcu_node *rnp)
101068 {
101069 return !rcu_preempted_readers_exp(rnp) &&
101070- ACCESS_ONCE(rnp->expmask) == 0;
101071+ ACCESS_ONCE_RW(rnp->expmask) == 0;
101072 }
101073
101074 /*
101075@@ -780,7 +780,7 @@ void synchronize_rcu_expedited(void)
101076
101077 /* Clean up and exit. */
101078 smp_mb(); /* ensure expedited GP seen before counter increment. */
101079- ACCESS_ONCE(sync_rcu_preempt_exp_count) =
101080+ ACCESS_ONCE_RW(sync_rcu_preempt_exp_count) =
101081 sync_rcu_preempt_exp_count + 1;
101082 unlock_mb_ret:
101083 mutex_unlock(&sync_rcu_preempt_exp_mutex);
101084@@ -1290,7 +1290,7 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
101085 free_cpumask_var(cm);
101086 }
101087
101088-static struct smp_hotplug_thread rcu_cpu_thread_spec = {
101089+static struct smp_hotplug_thread rcu_cpu_thread_spec __read_only = {
101090 .store = &rcu_cpu_kthread_task,
101091 .thread_should_run = rcu_cpu_kthread_should_run,
101092 .thread_fn = rcu_cpu_kthread,
101093@@ -1761,7 +1761,7 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
101094 print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
101095 pr_err("\t%d: (%lu %s) idle=%03x/%llx/%d softirq=%u/%u fqs=%ld %s\n",
101096 cpu, ticks_value, ticks_title,
101097- atomic_read(&rdtp->dynticks) & 0xfff,
101098+ atomic_read_unchecked(&rdtp->dynticks) & 0xfff,
101099 rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting,
101100 rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu),
101101 ACCESS_ONCE(rsp->n_force_qs) - rsp->n_force_qs_gpstart,
101102@@ -1906,7 +1906,7 @@ static void wake_nocb_leader(struct rcu_data *rdp, bool force)
101103 return;
101104 if (ACCESS_ONCE(rdp_leader->nocb_leader_sleep) || force) {
101105 /* Prior smp_mb__after_atomic() orders against prior enqueue. */
101106- ACCESS_ONCE(rdp_leader->nocb_leader_sleep) = false;
101107+ ACCESS_ONCE_RW(rdp_leader->nocb_leader_sleep) = false;
101108 wake_up(&rdp_leader->nocb_wq);
101109 }
101110 }
101111@@ -1978,7 +1978,7 @@ static void __call_rcu_nocb_enqueue(struct rcu_data *rdp,
101112 atomic_long_add(rhcount, &rdp->nocb_q_count);
101113 /* rcu_barrier() relies on ->nocb_q_count add before xchg. */
101114 old_rhpp = xchg(&rdp->nocb_tail, rhtp);
101115- ACCESS_ONCE(*old_rhpp) = rhp;
101116+ ACCESS_ONCE_RW(*old_rhpp) = rhp;
101117 atomic_long_add(rhcount_lazy, &rdp->nocb_q_count_lazy);
101118 smp_mb__after_atomic(); /* Store *old_rhpp before _wake test. */
101119
101120@@ -2167,7 +2167,7 @@ wait_again:
101121 continue; /* No CBs here, try next follower. */
101122
101123 /* Move callbacks to wait-for-GP list, which is empty. */
101124- ACCESS_ONCE(rdp->nocb_head) = NULL;
101125+ ACCESS_ONCE_RW(rdp->nocb_head) = NULL;
101126 rdp->nocb_gp_tail = xchg(&rdp->nocb_tail, &rdp->nocb_head);
101127 gotcbs = true;
101128 }
101129@@ -2288,7 +2288,7 @@ static int rcu_nocb_kthread(void *arg)
101130 list = ACCESS_ONCE(rdp->nocb_follower_head);
101131 BUG_ON(!list);
101132 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, "WokeNonEmpty");
101133- ACCESS_ONCE(rdp->nocb_follower_head) = NULL;
101134+ ACCESS_ONCE_RW(rdp->nocb_follower_head) = NULL;
101135 tail = xchg(&rdp->nocb_follower_tail, &rdp->nocb_follower_head);
101136
101137 /* Each pass through the following loop invokes a callback. */
101138@@ -2338,7 +2338,7 @@ static void do_nocb_deferred_wakeup(struct rcu_data *rdp)
101139 if (!rcu_nocb_need_deferred_wakeup(rdp))
101140 return;
101141 ndw = ACCESS_ONCE(rdp->nocb_defer_wakeup);
101142- ACCESS_ONCE(rdp->nocb_defer_wakeup) = RCU_NOGP_WAKE_NOT;
101143+ ACCESS_ONCE_RW(rdp->nocb_defer_wakeup) = RCU_NOGP_WAKE_NOT;
101144 wake_nocb_leader(rdp, ndw == RCU_NOGP_WAKE_FORCE);
101145 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("DeferredWake"));
101146 }
101147@@ -2461,7 +2461,7 @@ static void rcu_spawn_one_nocb_kthread(struct rcu_state *rsp, int cpu)
101148 t = kthread_run(rcu_nocb_kthread, rdp_spawn,
101149 "rcuo%c/%d", rsp->abbr, cpu);
101150 BUG_ON(IS_ERR(t));
101151- ACCESS_ONCE(rdp_spawn->nocb_kthread) = t;
101152+ ACCESS_ONCE_RW(rdp_spawn->nocb_kthread) = t;
101153 }
101154
101155 /*
101156@@ -2666,11 +2666,11 @@ static void rcu_sysidle_enter(int irq)
101157
101158 /* Record start of fully idle period. */
101159 j = jiffies;
101160- ACCESS_ONCE(rdtp->dynticks_idle_jiffies) = j;
101161+ ACCESS_ONCE_RW(rdtp->dynticks_idle_jiffies) = j;
101162 smp_mb__before_atomic();
101163- atomic_inc(&rdtp->dynticks_idle);
101164+ atomic_inc_unchecked(&rdtp->dynticks_idle);
101165 smp_mb__after_atomic();
101166- WARN_ON_ONCE(atomic_read(&rdtp->dynticks_idle) & 0x1);
101167+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks_idle) & 0x1);
101168 }
101169
101170 /*
101171@@ -2741,9 +2741,9 @@ static void rcu_sysidle_exit(int irq)
101172
101173 /* Record end of idle period. */
101174 smp_mb__before_atomic();
101175- atomic_inc(&rdtp->dynticks_idle);
101176+ atomic_inc_unchecked(&rdtp->dynticks_idle);
101177 smp_mb__after_atomic();
101178- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks_idle) & 0x1));
101179+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks_idle) & 0x1));
101180
101181 /*
101182 * If we are the timekeeping CPU, we are permitted to be non-idle
101183@@ -2788,7 +2788,7 @@ static void rcu_sysidle_check_cpu(struct rcu_data *rdp, bool *isidle,
101184 WARN_ON_ONCE(smp_processor_id() != tick_do_timer_cpu);
101185
101186 /* Pick up current idle and NMI-nesting counter and check. */
101187- cur = atomic_read(&rdtp->dynticks_idle);
101188+ cur = atomic_read_unchecked(&rdtp->dynticks_idle);
101189 if (cur & 0x1) {
101190 *isidle = false; /* We are not idle! */
101191 return;
101192@@ -2837,7 +2837,7 @@ static void rcu_sysidle(unsigned long j)
101193 case RCU_SYSIDLE_NOT:
101194
101195 /* First time all are idle, so note a short idle period. */
101196- ACCESS_ONCE(full_sysidle_state) = RCU_SYSIDLE_SHORT;
101197+ ACCESS_ONCE_RW(full_sysidle_state) = RCU_SYSIDLE_SHORT;
101198 break;
101199
101200 case RCU_SYSIDLE_SHORT:
101201@@ -2875,7 +2875,7 @@ static void rcu_sysidle_cancel(void)
101202 {
101203 smp_mb();
101204 if (full_sysidle_state > RCU_SYSIDLE_SHORT)
101205- ACCESS_ONCE(full_sysidle_state) = RCU_SYSIDLE_NOT;
101206+ ACCESS_ONCE_RW(full_sysidle_state) = RCU_SYSIDLE_NOT;
101207 }
101208
101209 /*
101210@@ -2927,7 +2927,7 @@ static void rcu_sysidle_cb(struct rcu_head *rhp)
101211 smp_mb(); /* grace period precedes setting inuse. */
101212
101213 rshp = container_of(rhp, struct rcu_sysidle_head, rh);
101214- ACCESS_ONCE(rshp->inuse) = 0;
101215+ ACCESS_ONCE_RW(rshp->inuse) = 0;
101216 }
101217
101218 /*
101219@@ -3080,7 +3080,7 @@ static void rcu_bind_gp_kthread(void)
101220 static void rcu_dynticks_task_enter(void)
101221 {
101222 #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL)
101223- ACCESS_ONCE(current->rcu_tasks_idle_cpu) = smp_processor_id();
101224+ ACCESS_ONCE_RW(current->rcu_tasks_idle_cpu) = smp_processor_id();
101225 #endif /* #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) */
101226 }
101227
101228@@ -3088,6 +3088,6 @@ static void rcu_dynticks_task_enter(void)
101229 static void rcu_dynticks_task_exit(void)
101230 {
101231 #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL)
101232- ACCESS_ONCE(current->rcu_tasks_idle_cpu) = -1;
101233+ ACCESS_ONCE_RW(current->rcu_tasks_idle_cpu) = -1;
101234 #endif /* #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) */
101235 }
101236diff --git a/kernel/rcu/tree_trace.c b/kernel/rcu/tree_trace.c
101237index fbb6240..f6c5097 100644
101238--- a/kernel/rcu/tree_trace.c
101239+++ b/kernel/rcu/tree_trace.c
101240@@ -125,7 +125,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
101241 rdp->rcu_qs_ctr_snap == per_cpu(rcu_qs_ctr, rdp->cpu),
101242 rdp->qs_pending);
101243 seq_printf(m, " dt=%d/%llx/%d df=%lu",
101244- atomic_read(&rdp->dynticks->dynticks),
101245+ atomic_read_unchecked(&rdp->dynticks->dynticks),
101246 rdp->dynticks->dynticks_nesting,
101247 rdp->dynticks->dynticks_nmi_nesting,
101248 rdp->dynticks_fqs);
101249@@ -186,17 +186,17 @@ static int show_rcuexp(struct seq_file *m, void *v)
101250 struct rcu_state *rsp = (struct rcu_state *)m->private;
101251
101252 seq_printf(m, "s=%lu d=%lu w=%lu tf=%lu wd1=%lu wd2=%lu n=%lu sc=%lu dt=%lu dl=%lu dx=%lu\n",
101253- atomic_long_read(&rsp->expedited_start),
101254+ atomic_long_read_unchecked(&rsp->expedited_start),
101255 atomic_long_read(&rsp->expedited_done),
101256- atomic_long_read(&rsp->expedited_wrap),
101257- atomic_long_read(&rsp->expedited_tryfail),
101258- atomic_long_read(&rsp->expedited_workdone1),
101259- atomic_long_read(&rsp->expedited_workdone2),
101260- atomic_long_read(&rsp->expedited_normal),
101261- atomic_long_read(&rsp->expedited_stoppedcpus),
101262- atomic_long_read(&rsp->expedited_done_tries),
101263- atomic_long_read(&rsp->expedited_done_lost),
101264- atomic_long_read(&rsp->expedited_done_exit));
101265+ atomic_long_read_unchecked(&rsp->expedited_wrap),
101266+ atomic_long_read_unchecked(&rsp->expedited_tryfail),
101267+ atomic_long_read_unchecked(&rsp->expedited_workdone1),
101268+ atomic_long_read_unchecked(&rsp->expedited_workdone2),
101269+ atomic_long_read_unchecked(&rsp->expedited_normal),
101270+ atomic_long_read_unchecked(&rsp->expedited_stoppedcpus),
101271+ atomic_long_read_unchecked(&rsp->expedited_done_tries),
101272+ atomic_long_read_unchecked(&rsp->expedited_done_lost),
101273+ atomic_long_read_unchecked(&rsp->expedited_done_exit));
101274 return 0;
101275 }
101276
101277diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c
101278index e0d31a3..f4dafe3 100644
101279--- a/kernel/rcu/update.c
101280+++ b/kernel/rcu/update.c
101281@@ -342,10 +342,10 @@ int rcu_jiffies_till_stall_check(void)
101282 * for CONFIG_RCU_CPU_STALL_TIMEOUT.
101283 */
101284 if (till_stall_check < 3) {
101285- ACCESS_ONCE(rcu_cpu_stall_timeout) = 3;
101286+ ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 3;
101287 till_stall_check = 3;
101288 } else if (till_stall_check > 300) {
101289- ACCESS_ONCE(rcu_cpu_stall_timeout) = 300;
101290+ ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 300;
101291 till_stall_check = 300;
101292 }
101293 return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
101294@@ -501,7 +501,7 @@ static void check_holdout_task(struct task_struct *t,
101295 !ACCESS_ONCE(t->on_rq) ||
101296 (IS_ENABLED(CONFIG_NO_HZ_FULL) &&
101297 !is_idle_task(t) && t->rcu_tasks_idle_cpu >= 0)) {
101298- ACCESS_ONCE(t->rcu_tasks_holdout) = false;
101299+ ACCESS_ONCE_RW(t->rcu_tasks_holdout) = false;
101300 list_del_init(&t->rcu_tasks_holdout_list);
101301 put_task_struct(t);
101302 return;
101303@@ -589,7 +589,7 @@ static int __noreturn rcu_tasks_kthread(void *arg)
101304 !is_idle_task(t)) {
101305 get_task_struct(t);
101306 t->rcu_tasks_nvcsw = ACCESS_ONCE(t->nvcsw);
101307- ACCESS_ONCE(t->rcu_tasks_holdout) = true;
101308+ ACCESS_ONCE_RW(t->rcu_tasks_holdout) = true;
101309 list_add(&t->rcu_tasks_holdout_list,
101310 &rcu_tasks_holdouts);
101311 }
101312@@ -686,7 +686,7 @@ static void rcu_spawn_tasks_kthread(void)
101313 t = kthread_run(rcu_tasks_kthread, NULL, "rcu_tasks_kthread");
101314 BUG_ON(IS_ERR(t));
101315 smp_mb(); /* Ensure others see full kthread. */
101316- ACCESS_ONCE(rcu_tasks_kthread_ptr) = t;
101317+ ACCESS_ONCE_RW(rcu_tasks_kthread_ptr) = t;
101318 mutex_unlock(&rcu_tasks_kthread_mutex);
101319 }
101320
101321diff --git a/kernel/resource.c b/kernel/resource.c
101322index 19f2357..ebe7f35 100644
101323--- a/kernel/resource.c
101324+++ b/kernel/resource.c
101325@@ -162,8 +162,18 @@ static const struct file_operations proc_iomem_operations = {
101326
101327 static int __init ioresources_init(void)
101328 {
101329+#ifdef CONFIG_GRKERNSEC_PROC_ADD
101330+#ifdef CONFIG_GRKERNSEC_PROC_USER
101331+ proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
101332+ proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
101333+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
101334+ proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
101335+ proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
101336+#endif
101337+#else
101338 proc_create("ioports", 0, NULL, &proc_ioports_operations);
101339 proc_create("iomem", 0, NULL, &proc_iomem_operations);
101340+#endif
101341 return 0;
101342 }
101343 __initcall(ioresources_init);
101344diff --git a/kernel/sched/auto_group.c b/kernel/sched/auto_group.c
101345index eae160d..c9aa22e 100644
101346--- a/kernel/sched/auto_group.c
101347+++ b/kernel/sched/auto_group.c
101348@@ -11,7 +11,7 @@
101349
101350 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
101351 static struct autogroup autogroup_default;
101352-static atomic_t autogroup_seq_nr;
101353+static atomic_unchecked_t autogroup_seq_nr;
101354
101355 void __init autogroup_init(struct task_struct *init_task)
101356 {
101357@@ -79,7 +79,7 @@ static inline struct autogroup *autogroup_create(void)
101358
101359 kref_init(&ag->kref);
101360 init_rwsem(&ag->lock);
101361- ag->id = atomic_inc_return(&autogroup_seq_nr);
101362+ ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
101363 ag->tg = tg;
101364 #ifdef CONFIG_RT_GROUP_SCHED
101365 /*
101366diff --git a/kernel/sched/completion.c b/kernel/sched/completion.c
101367index 8d0f35d..c16360d 100644
101368--- a/kernel/sched/completion.c
101369+++ b/kernel/sched/completion.c
101370@@ -205,7 +205,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible);
101371 * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1,
101372 * or number of jiffies left till timeout) if completed.
101373 */
101374-long __sched
101375+long __sched __intentional_overflow(-1)
101376 wait_for_completion_interruptible_timeout(struct completion *x,
101377 unsigned long timeout)
101378 {
101379@@ -222,7 +222,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
101380 *
101381 * Return: -ERESTARTSYS if interrupted, 0 if completed.
101382 */
101383-int __sched wait_for_completion_killable(struct completion *x)
101384+int __sched __intentional_overflow(-1) wait_for_completion_killable(struct completion *x)
101385 {
101386 long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE);
101387 if (t == -ERESTARTSYS)
101388@@ -243,7 +243,7 @@ EXPORT_SYMBOL(wait_for_completion_killable);
101389 * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1,
101390 * or number of jiffies left till timeout) if completed.
101391 */
101392-long __sched
101393+long __sched __intentional_overflow(-1)
101394 wait_for_completion_killable_timeout(struct completion *x,
101395 unsigned long timeout)
101396 {
101397diff --git a/kernel/sched/core.c b/kernel/sched/core.c
101398index f4da2cb..e44587b 100644
101399--- a/kernel/sched/core.c
101400+++ b/kernel/sched/core.c
101401@@ -1862,7 +1862,7 @@ void set_numabalancing_state(bool enabled)
101402 int sysctl_numa_balancing(struct ctl_table *table, int write,
101403 void __user *buffer, size_t *lenp, loff_t *ppos)
101404 {
101405- struct ctl_table t;
101406+ ctl_table_no_const t;
101407 int err;
101408 int state = numabalancing_enabled;
101409
101410@@ -2312,8 +2312,10 @@ context_switch(struct rq *rq, struct task_struct *prev,
101411 next->active_mm = oldmm;
101412 atomic_inc(&oldmm->mm_count);
101413 enter_lazy_tlb(oldmm, next);
101414- } else
101415+ } else {
101416 switch_mm(oldmm, mm, next);
101417+ populate_stack();
101418+ }
101419
101420 if (!prev->mm) {
101421 prev->active_mm = NULL;
101422@@ -3124,6 +3126,8 @@ int can_nice(const struct task_struct *p, const int nice)
101423 /* convert nice value [19,-20] to rlimit style value [1,40] */
101424 int nice_rlim = nice_to_rlimit(nice);
101425
101426+ gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
101427+
101428 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
101429 capable(CAP_SYS_NICE));
101430 }
101431@@ -3150,7 +3154,8 @@ SYSCALL_DEFINE1(nice, int, increment)
101432 nice = task_nice(current) + increment;
101433
101434 nice = clamp_val(nice, MIN_NICE, MAX_NICE);
101435- if (increment < 0 && !can_nice(current, nice))
101436+ if (increment < 0 && (!can_nice(current, nice) ||
101437+ gr_handle_chroot_nice()))
101438 return -EPERM;
101439
101440 retval = security_task_setnice(current, nice);
101441@@ -3462,6 +3467,7 @@ recheck:
101442 if (policy != p->policy && !rlim_rtprio)
101443 return -EPERM;
101444
101445+ gr_learn_resource(p, RLIMIT_RTPRIO, attr->sched_priority, 1);
101446 /* can't increase priority */
101447 if (attr->sched_priority > p->rt_priority &&
101448 attr->sched_priority > rlim_rtprio)
101449@@ -4945,6 +4951,7 @@ void idle_task_exit(void)
101450
101451 if (mm != &init_mm) {
101452 switch_mm(mm, &init_mm, current);
101453+ populate_stack();
101454 finish_arch_post_lock_switch();
101455 }
101456 mmdrop(mm);
101457@@ -5040,7 +5047,7 @@ static void migrate_tasks(unsigned int dead_cpu)
101458
101459 #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
101460
101461-static struct ctl_table sd_ctl_dir[] = {
101462+static ctl_table_no_const sd_ctl_dir[] __read_only = {
101463 {
101464 .procname = "sched_domain",
101465 .mode = 0555,
101466@@ -5057,17 +5064,17 @@ static struct ctl_table sd_ctl_root[] = {
101467 {}
101468 };
101469
101470-static struct ctl_table *sd_alloc_ctl_entry(int n)
101471+static ctl_table_no_const *sd_alloc_ctl_entry(int n)
101472 {
101473- struct ctl_table *entry =
101474+ ctl_table_no_const *entry =
101475 kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
101476
101477 return entry;
101478 }
101479
101480-static void sd_free_ctl_entry(struct ctl_table **tablep)
101481+static void sd_free_ctl_entry(ctl_table_no_const *tablep)
101482 {
101483- struct ctl_table *entry;
101484+ ctl_table_no_const *entry;
101485
101486 /*
101487 * In the intermediate directories, both the child directory and
101488@@ -5075,22 +5082,25 @@ static void sd_free_ctl_entry(struct ctl_table **tablep)
101489 * will always be set. In the lowest directory the names are
101490 * static strings and all have proc handlers.
101491 */
101492- for (entry = *tablep; entry->mode; entry++) {
101493- if (entry->child)
101494- sd_free_ctl_entry(&entry->child);
101495+ for (entry = tablep; entry->mode; entry++) {
101496+ if (entry->child) {
101497+ sd_free_ctl_entry(entry->child);
101498+ pax_open_kernel();
101499+ entry->child = NULL;
101500+ pax_close_kernel();
101501+ }
101502 if (entry->proc_handler == NULL)
101503 kfree(entry->procname);
101504 }
101505
101506- kfree(*tablep);
101507- *tablep = NULL;
101508+ kfree(tablep);
101509 }
101510
101511 static int min_load_idx = 0;
101512 static int max_load_idx = CPU_LOAD_IDX_MAX-1;
101513
101514 static void
101515-set_table_entry(struct ctl_table *entry,
101516+set_table_entry(ctl_table_no_const *entry,
101517 const char *procname, void *data, int maxlen,
101518 umode_t mode, proc_handler *proc_handler,
101519 bool load_idx)
101520@@ -5110,7 +5120,7 @@ set_table_entry(struct ctl_table *entry,
101521 static struct ctl_table *
101522 sd_alloc_ctl_domain_table(struct sched_domain *sd)
101523 {
101524- struct ctl_table *table = sd_alloc_ctl_entry(14);
101525+ ctl_table_no_const *table = sd_alloc_ctl_entry(14);
101526
101527 if (table == NULL)
101528 return NULL;
101529@@ -5148,9 +5158,9 @@ sd_alloc_ctl_domain_table(struct sched_domain *sd)
101530 return table;
101531 }
101532
101533-static struct ctl_table *sd_alloc_ctl_cpu_table(int cpu)
101534+static ctl_table_no_const *sd_alloc_ctl_cpu_table(int cpu)
101535 {
101536- struct ctl_table *entry, *table;
101537+ ctl_table_no_const *entry, *table;
101538 struct sched_domain *sd;
101539 int domain_num = 0, i;
101540 char buf[32];
101541@@ -5177,11 +5187,13 @@ static struct ctl_table_header *sd_sysctl_header;
101542 static void register_sched_domain_sysctl(void)
101543 {
101544 int i, cpu_num = num_possible_cpus();
101545- struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
101546+ ctl_table_no_const *entry = sd_alloc_ctl_entry(cpu_num + 1);
101547 char buf[32];
101548
101549 WARN_ON(sd_ctl_dir[0].child);
101550+ pax_open_kernel();
101551 sd_ctl_dir[0].child = entry;
101552+ pax_close_kernel();
101553
101554 if (entry == NULL)
101555 return;
101556@@ -5204,8 +5216,12 @@ static void unregister_sched_domain_sysctl(void)
101557 if (sd_sysctl_header)
101558 unregister_sysctl_table(sd_sysctl_header);
101559 sd_sysctl_header = NULL;
101560- if (sd_ctl_dir[0].child)
101561- sd_free_ctl_entry(&sd_ctl_dir[0].child);
101562+ if (sd_ctl_dir[0].child) {
101563+ sd_free_ctl_entry(sd_ctl_dir[0].child);
101564+ pax_open_kernel();
101565+ sd_ctl_dir[0].child = NULL;
101566+ pax_close_kernel();
101567+ }
101568 }
101569 #else
101570 static void register_sched_domain_sysctl(void)
101571diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
101572index 486d00c..62f3f6e 100644
101573--- a/kernel/sched/fair.c
101574+++ b/kernel/sched/fair.c
101575@@ -2092,7 +2092,7 @@ void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags)
101576
101577 static void reset_ptenuma_scan(struct task_struct *p)
101578 {
101579- ACCESS_ONCE(p->mm->numa_scan_seq)++;
101580+ ACCESS_ONCE_RW(p->mm->numa_scan_seq)++;
101581 p->mm->numa_scan_offset = 0;
101582 }
101583
101584@@ -7656,7 +7656,7 @@ static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) { }
101585 * run_rebalance_domains is triggered when needed from the scheduler tick.
101586 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
101587 */
101588-static void run_rebalance_domains(struct softirq_action *h)
101589+static __latent_entropy void run_rebalance_domains(void)
101590 {
101591 struct rq *this_rq = this_rq();
101592 enum cpu_idle_type idle = this_rq->idle_balance ?
101593diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
101594index dc0f435..ae2e085 100644
101595--- a/kernel/sched/sched.h
101596+++ b/kernel/sched/sched.h
101597@@ -1200,7 +1200,7 @@ struct sched_class {
101598 #ifdef CONFIG_FAIR_GROUP_SCHED
101599 void (*task_move_group) (struct task_struct *p, int on_rq);
101600 #endif
101601-};
101602+} __do_const;
101603
101604 static inline void put_prev_task(struct rq *rq, struct task_struct *prev)
101605 {
101606diff --git a/kernel/signal.c b/kernel/signal.c
101607index a390499..ebe9a21 100644
101608--- a/kernel/signal.c
101609+++ b/kernel/signal.c
101610@@ -53,12 +53,12 @@ static struct kmem_cache *sigqueue_cachep;
101611
101612 int print_fatal_signals __read_mostly;
101613
101614-static void __user *sig_handler(struct task_struct *t, int sig)
101615+static __sighandler_t sig_handler(struct task_struct *t, int sig)
101616 {
101617 return t->sighand->action[sig - 1].sa.sa_handler;
101618 }
101619
101620-static int sig_handler_ignored(void __user *handler, int sig)
101621+static int sig_handler_ignored(__sighandler_t handler, int sig)
101622 {
101623 /* Is it explicitly or implicitly ignored? */
101624 return handler == SIG_IGN ||
101625@@ -67,7 +67,7 @@ static int sig_handler_ignored(void __user *handler, int sig)
101626
101627 static int sig_task_ignored(struct task_struct *t, int sig, bool force)
101628 {
101629- void __user *handler;
101630+ __sighandler_t handler;
101631
101632 handler = sig_handler(t, sig);
101633
101634@@ -372,6 +372,9 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
101635 atomic_inc(&user->sigpending);
101636 rcu_read_unlock();
101637
101638+ if (!override_rlimit)
101639+ gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
101640+
101641 if (override_rlimit ||
101642 atomic_read(&user->sigpending) <=
101643 task_rlimit(t, RLIMIT_SIGPENDING)) {
101644@@ -499,7 +502,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
101645
101646 int unhandled_signal(struct task_struct *tsk, int sig)
101647 {
101648- void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
101649+ __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
101650 if (is_global_init(tsk))
101651 return 1;
101652 if (handler != SIG_IGN && handler != SIG_DFL)
101653@@ -793,6 +796,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
101654 }
101655 }
101656
101657+ /* allow glibc communication via tgkill to other threads in our
101658+ thread group */
101659+ if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
101660+ sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
101661+ && gr_handle_signal(t, sig))
101662+ return -EPERM;
101663+
101664 return security_task_kill(t, info, sig, 0);
101665 }
101666
101667@@ -1176,7 +1186,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
101668 return send_signal(sig, info, p, 1);
101669 }
101670
101671-static int
101672+int
101673 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
101674 {
101675 return send_signal(sig, info, t, 0);
101676@@ -1213,6 +1223,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
101677 unsigned long int flags;
101678 int ret, blocked, ignored;
101679 struct k_sigaction *action;
101680+ int is_unhandled = 0;
101681
101682 spin_lock_irqsave(&t->sighand->siglock, flags);
101683 action = &t->sighand->action[sig-1];
101684@@ -1227,9 +1238,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
101685 }
101686 if (action->sa.sa_handler == SIG_DFL)
101687 t->signal->flags &= ~SIGNAL_UNKILLABLE;
101688+ if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
101689+ is_unhandled = 1;
101690 ret = specific_send_sig_info(sig, info, t);
101691 spin_unlock_irqrestore(&t->sighand->siglock, flags);
101692
101693+ /* only deal with unhandled signals, java etc trigger SIGSEGV during
101694+ normal operation */
101695+ if (is_unhandled) {
101696+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
101697+ gr_handle_crash(t, sig);
101698+ }
101699+
101700 return ret;
101701 }
101702
101703@@ -1310,8 +1330,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
101704 ret = check_kill_permission(sig, info, p);
101705 rcu_read_unlock();
101706
101707- if (!ret && sig)
101708+ if (!ret && sig) {
101709 ret = do_send_sig_info(sig, info, p, true);
101710+ if (!ret)
101711+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
101712+ }
101713
101714 return ret;
101715 }
101716@@ -2915,7 +2938,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
101717 int error = -ESRCH;
101718
101719 rcu_read_lock();
101720- p = find_task_by_vpid(pid);
101721+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
101722+ /* allow glibc communication via tgkill to other threads in our
101723+ thread group */
101724+ if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
101725+ sig == (SIGRTMIN+1) && tgid == info->si_pid)
101726+ p = find_task_by_vpid_unrestricted(pid);
101727+ else
101728+#endif
101729+ p = find_task_by_vpid(pid);
101730 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
101731 error = check_kill_permission(sig, info, p);
101732 /*
101733@@ -3248,8 +3279,8 @@ COMPAT_SYSCALL_DEFINE2(sigaltstack,
101734 }
101735 seg = get_fs();
101736 set_fs(KERNEL_DS);
101737- ret = do_sigaltstack((stack_t __force __user *) (uss_ptr ? &uss : NULL),
101738- (stack_t __force __user *) &uoss,
101739+ ret = do_sigaltstack((stack_t __force_user *) (uss_ptr ? &uss : NULL),
101740+ (stack_t __force_user *) &uoss,
101741 compat_user_stack_pointer());
101742 set_fs(seg);
101743 if (ret >= 0 && uoss_ptr) {
101744diff --git a/kernel/smpboot.c b/kernel/smpboot.c
101745index 40190f2..8861d40 100644
101746--- a/kernel/smpboot.c
101747+++ b/kernel/smpboot.c
101748@@ -290,7 +290,7 @@ int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread)
101749 }
101750 smpboot_unpark_thread(plug_thread, cpu);
101751 }
101752- list_add(&plug_thread->list, &hotplug_threads);
101753+ pax_list_add(&plug_thread->list, &hotplug_threads);
101754 out:
101755 mutex_unlock(&smpboot_threads_lock);
101756 put_online_cpus();
101757@@ -308,7 +308,7 @@ void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread)
101758 {
101759 get_online_cpus();
101760 mutex_lock(&smpboot_threads_lock);
101761- list_del(&plug_thread->list);
101762+ pax_list_del(&plug_thread->list);
101763 smpboot_destroy_threads(plug_thread);
101764 mutex_unlock(&smpboot_threads_lock);
101765 put_online_cpus();
101766diff --git a/kernel/softirq.c b/kernel/softirq.c
101767index 479e443..66d845e1 100644
101768--- a/kernel/softirq.c
101769+++ b/kernel/softirq.c
101770@@ -53,7 +53,7 @@ irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
101771 EXPORT_SYMBOL(irq_stat);
101772 #endif
101773
101774-static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
101775+static struct softirq_action softirq_vec[NR_SOFTIRQS] __read_only __aligned(PAGE_SIZE);
101776
101777 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
101778
101779@@ -270,7 +270,7 @@ restart:
101780 kstat_incr_softirqs_this_cpu(vec_nr);
101781
101782 trace_softirq_entry(vec_nr);
101783- h->action(h);
101784+ h->action();
101785 trace_softirq_exit(vec_nr);
101786 if (unlikely(prev_count != preempt_count())) {
101787 pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
101788@@ -430,7 +430,7 @@ void __raise_softirq_irqoff(unsigned int nr)
101789 or_softirq_pending(1UL << nr);
101790 }
101791
101792-void open_softirq(int nr, void (*action)(struct softirq_action *))
101793+void __init open_softirq(int nr, void (*action)(void))
101794 {
101795 softirq_vec[nr].action = action;
101796 }
101797@@ -482,7 +482,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
101798 }
101799 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
101800
101801-static void tasklet_action(struct softirq_action *a)
101802+static void tasklet_action(void)
101803 {
101804 struct tasklet_struct *list;
101805
101806@@ -518,7 +518,7 @@ static void tasklet_action(struct softirq_action *a)
101807 }
101808 }
101809
101810-static void tasklet_hi_action(struct softirq_action *a)
101811+static __latent_entropy void tasklet_hi_action(void)
101812 {
101813 struct tasklet_struct *list;
101814
101815@@ -744,7 +744,7 @@ static struct notifier_block cpu_nfb = {
101816 .notifier_call = cpu_callback
101817 };
101818
101819-static struct smp_hotplug_thread softirq_threads = {
101820+static struct smp_hotplug_thread softirq_threads __read_only = {
101821 .store = &ksoftirqd,
101822 .thread_should_run = ksoftirqd_should_run,
101823 .thread_fn = run_ksoftirqd,
101824diff --git a/kernel/sys.c b/kernel/sys.c
101825index a03d9cd..55dbe9c 100644
101826--- a/kernel/sys.c
101827+++ b/kernel/sys.c
101828@@ -160,6 +160,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
101829 error = -EACCES;
101830 goto out;
101831 }
101832+
101833+ if (gr_handle_chroot_setpriority(p, niceval)) {
101834+ error = -EACCES;
101835+ goto out;
101836+ }
101837+
101838 no_nice = security_task_setnice(p, niceval);
101839 if (no_nice) {
101840 error = no_nice;
101841@@ -365,6 +371,20 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
101842 goto error;
101843 }
101844
101845+ if (gr_check_group_change(new->gid, new->egid, INVALID_GID))
101846+ goto error;
101847+
101848+ if (!gid_eq(new->gid, old->gid)) {
101849+ /* make sure we generate a learn log for what will
101850+ end up being a role transition after a full-learning
101851+ policy is generated
101852+ CAP_SETGID is required to perform a transition
101853+ we may not log a CAP_SETGID check above, e.g.
101854+ in the case where new rgid = old egid
101855+ */
101856+ gr_learn_cap(current, new, CAP_SETGID);
101857+ }
101858+
101859 if (rgid != (gid_t) -1 ||
101860 (egid != (gid_t) -1 && !gid_eq(kegid, old->gid)))
101861 new->sgid = new->egid;
101862@@ -400,6 +420,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
101863 old = current_cred();
101864
101865 retval = -EPERM;
101866+
101867+ if (gr_check_group_change(kgid, kgid, kgid))
101868+ goto error;
101869+
101870 if (ns_capable(old->user_ns, CAP_SETGID))
101871 new->gid = new->egid = new->sgid = new->fsgid = kgid;
101872 else if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->sgid))
101873@@ -417,7 +441,7 @@ error:
101874 /*
101875 * change the user struct in a credentials set to match the new UID
101876 */
101877-static int set_user(struct cred *new)
101878+int set_user(struct cred *new)
101879 {
101880 struct user_struct *new_user;
101881
101882@@ -497,7 +521,18 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
101883 goto error;
101884 }
101885
101886+ if (gr_check_user_change(new->uid, new->euid, INVALID_UID))
101887+ goto error;
101888+
101889 if (!uid_eq(new->uid, old->uid)) {
101890+ /* make sure we generate a learn log for what will
101891+ end up being a role transition after a full-learning
101892+ policy is generated
101893+ CAP_SETUID is required to perform a transition
101894+ we may not log a CAP_SETUID check above, e.g.
101895+ in the case where new ruid = old euid
101896+ */
101897+ gr_learn_cap(current, new, CAP_SETUID);
101898 retval = set_user(new);
101899 if (retval < 0)
101900 goto error;
101901@@ -547,6 +582,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
101902 old = current_cred();
101903
101904 retval = -EPERM;
101905+
101906+ if (gr_check_crash_uid(kuid))
101907+ goto error;
101908+ if (gr_check_user_change(kuid, kuid, kuid))
101909+ goto error;
101910+
101911 if (ns_capable(old->user_ns, CAP_SETUID)) {
101912 new->suid = new->uid = kuid;
101913 if (!uid_eq(kuid, old->uid)) {
101914@@ -616,6 +657,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
101915 goto error;
101916 }
101917
101918+ if (gr_check_user_change(kruid, keuid, INVALID_UID))
101919+ goto error;
101920+
101921 if (ruid != (uid_t) -1) {
101922 new->uid = kruid;
101923 if (!uid_eq(kruid, old->uid)) {
101924@@ -700,6 +744,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
101925 goto error;
101926 }
101927
101928+ if (gr_check_group_change(krgid, kegid, INVALID_GID))
101929+ goto error;
101930+
101931 if (rgid != (gid_t) -1)
101932 new->gid = krgid;
101933 if (egid != (gid_t) -1)
101934@@ -764,12 +811,16 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
101935 uid_eq(kuid, old->suid) || uid_eq(kuid, old->fsuid) ||
101936 ns_capable(old->user_ns, CAP_SETUID)) {
101937 if (!uid_eq(kuid, old->fsuid)) {
101938+ if (gr_check_user_change(INVALID_UID, INVALID_UID, kuid))
101939+ goto error;
101940+
101941 new->fsuid = kuid;
101942 if (security_task_fix_setuid(new, old, LSM_SETID_FS) == 0)
101943 goto change_okay;
101944 }
101945 }
101946
101947+error:
101948 abort_creds(new);
101949 return old_fsuid;
101950
101951@@ -802,12 +853,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
101952 if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->egid) ||
101953 gid_eq(kgid, old->sgid) || gid_eq(kgid, old->fsgid) ||
101954 ns_capable(old->user_ns, CAP_SETGID)) {
101955+ if (gr_check_group_change(INVALID_GID, INVALID_GID, kgid))
101956+ goto error;
101957+
101958 if (!gid_eq(kgid, old->fsgid)) {
101959 new->fsgid = kgid;
101960 goto change_okay;
101961 }
101962 }
101963
101964+error:
101965 abort_creds(new);
101966 return old_fsgid;
101967
101968@@ -1185,19 +1240,19 @@ SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
101969 return -EFAULT;
101970
101971 down_read(&uts_sem);
101972- error = __copy_to_user(&name->sysname, &utsname()->sysname,
101973+ error = __copy_to_user(name->sysname, &utsname()->sysname,
101974 __OLD_UTS_LEN);
101975 error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
101976- error |= __copy_to_user(&name->nodename, &utsname()->nodename,
101977+ error |= __copy_to_user(name->nodename, &utsname()->nodename,
101978 __OLD_UTS_LEN);
101979 error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
101980- error |= __copy_to_user(&name->release, &utsname()->release,
101981+ error |= __copy_to_user(name->release, &utsname()->release,
101982 __OLD_UTS_LEN);
101983 error |= __put_user(0, name->release + __OLD_UTS_LEN);
101984- error |= __copy_to_user(&name->version, &utsname()->version,
101985+ error |= __copy_to_user(name->version, &utsname()->version,
101986 __OLD_UTS_LEN);
101987 error |= __put_user(0, name->version + __OLD_UTS_LEN);
101988- error |= __copy_to_user(&name->machine, &utsname()->machine,
101989+ error |= __copy_to_user(name->machine, &utsname()->machine,
101990 __OLD_UTS_LEN);
101991 error |= __put_user(0, name->machine + __OLD_UTS_LEN);
101992 up_read(&uts_sem);
101993@@ -1398,6 +1453,13 @@ int do_prlimit(struct task_struct *tsk, unsigned int resource,
101994 */
101995 new_rlim->rlim_cur = 1;
101996 }
101997+ /* Handle the case where a fork and setuid occur and then RLIMIT_NPROC
101998+ is changed to a lower value. Since tasks can be created by the same
101999+ user in between this limit change and an execve by this task, force
102000+ a recheck only for this task by setting PF_NPROC_EXCEEDED
102001+ */
102002+ if (resource == RLIMIT_NPROC && tsk->real_cred->user != INIT_USER)
102003+ tsk->flags |= PF_NPROC_EXCEEDED;
102004 }
102005 if (!retval) {
102006 if (old_rlim)
102007diff --git a/kernel/sysctl.c b/kernel/sysctl.c
102008index ce410bb..cd276f0 100644
102009--- a/kernel/sysctl.c
102010+++ b/kernel/sysctl.c
102011@@ -94,7 +94,6 @@
102012
102013
102014 #if defined(CONFIG_SYSCTL)
102015-
102016 /* External variables not in a header file. */
102017 extern int max_threads;
102018 extern int suid_dumpable;
102019@@ -115,19 +114,20 @@ extern int sysctl_nr_trim_pages;
102020
102021 /* Constants used for minimum and maximum */
102022 #ifdef CONFIG_LOCKUP_DETECTOR
102023-static int sixty = 60;
102024+static int sixty __read_only = 60;
102025 #endif
102026
102027-static int __maybe_unused neg_one = -1;
102028+static int __maybe_unused neg_one __read_only = -1;
102029
102030-static int zero;
102031-static int __maybe_unused one = 1;
102032-static int __maybe_unused two = 2;
102033-static int __maybe_unused four = 4;
102034-static unsigned long one_ul = 1;
102035-static int one_hundred = 100;
102036+static int zero __read_only = 0;
102037+static int __maybe_unused one __read_only = 1;
102038+static int __maybe_unused two __read_only = 2;
102039+static int __maybe_unused three __read_only = 3;
102040+static int __maybe_unused four __read_only = 4;
102041+static unsigned long one_ul __read_only = 1;
102042+static int one_hundred __read_only = 100;
102043 #ifdef CONFIG_PRINTK
102044-static int ten_thousand = 10000;
102045+static int ten_thousand __read_only = 10000;
102046 #endif
102047
102048 /* this is needed for the proc_doulongvec_minmax of vm_dirty_bytes */
102049@@ -181,10 +181,8 @@ static int proc_taint(struct ctl_table *table, int write,
102050 void __user *buffer, size_t *lenp, loff_t *ppos);
102051 #endif
102052
102053-#ifdef CONFIG_PRINTK
102054 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
102055 void __user *buffer, size_t *lenp, loff_t *ppos);
102056-#endif
102057
102058 static int proc_dointvec_minmax_coredump(struct ctl_table *table, int write,
102059 void __user *buffer, size_t *lenp, loff_t *ppos);
102060@@ -215,6 +213,8 @@ static int sysrq_sysctl_handler(struct ctl_table *table, int write,
102061
102062 #endif
102063
102064+extern struct ctl_table grsecurity_table[];
102065+
102066 static struct ctl_table kern_table[];
102067 static struct ctl_table vm_table[];
102068 static struct ctl_table fs_table[];
102069@@ -229,6 +229,20 @@ extern struct ctl_table epoll_table[];
102070 int sysctl_legacy_va_layout;
102071 #endif
102072
102073+#ifdef CONFIG_PAX_SOFTMODE
102074+static struct ctl_table pax_table[] = {
102075+ {
102076+ .procname = "softmode",
102077+ .data = &pax_softmode,
102078+ .maxlen = sizeof(unsigned int),
102079+ .mode = 0600,
102080+ .proc_handler = &proc_dointvec,
102081+ },
102082+
102083+ { }
102084+};
102085+#endif
102086+
102087 /* The default sysctl tables: */
102088
102089 static struct ctl_table sysctl_base_table[] = {
102090@@ -277,6 +291,22 @@ static int max_extfrag_threshold = 1000;
102091 #endif
102092
102093 static struct ctl_table kern_table[] = {
102094+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
102095+ {
102096+ .procname = "grsecurity",
102097+ .mode = 0500,
102098+ .child = grsecurity_table,
102099+ },
102100+#endif
102101+
102102+#ifdef CONFIG_PAX_SOFTMODE
102103+ {
102104+ .procname = "pax",
102105+ .mode = 0500,
102106+ .child = pax_table,
102107+ },
102108+#endif
102109+
102110 {
102111 .procname = "sched_child_runs_first",
102112 .data = &sysctl_sched_child_runs_first,
102113@@ -649,7 +679,7 @@ static struct ctl_table kern_table[] = {
102114 .data = &modprobe_path,
102115 .maxlen = KMOD_PATH_LEN,
102116 .mode = 0644,
102117- .proc_handler = proc_dostring,
102118+ .proc_handler = proc_dostring_modpriv,
102119 },
102120 {
102121 .procname = "modules_disabled",
102122@@ -816,16 +846,20 @@ static struct ctl_table kern_table[] = {
102123 .extra1 = &zero,
102124 .extra2 = &one,
102125 },
102126+#endif
102127 {
102128 .procname = "kptr_restrict",
102129 .data = &kptr_restrict,
102130 .maxlen = sizeof(int),
102131 .mode = 0644,
102132 .proc_handler = proc_dointvec_minmax_sysadmin,
102133+#ifdef CONFIG_GRKERNSEC_HIDESYM
102134+ .extra1 = &two,
102135+#else
102136 .extra1 = &zero,
102137+#endif
102138 .extra2 = &two,
102139 },
102140-#endif
102141 {
102142 .procname = "ngroups_max",
102143 .data = &ngroups_max,
102144@@ -1072,10 +1106,17 @@ static struct ctl_table kern_table[] = {
102145 */
102146 {
102147 .procname = "perf_event_paranoid",
102148- .data = &sysctl_perf_event_paranoid,
102149- .maxlen = sizeof(sysctl_perf_event_paranoid),
102150+ .data = &sysctl_perf_event_legitimately_concerned,
102151+ .maxlen = sizeof(sysctl_perf_event_legitimately_concerned),
102152 .mode = 0644,
102153- .proc_handler = proc_dointvec,
102154+ /* go ahead, be a hero */
102155+ .proc_handler = proc_dointvec_minmax_sysadmin,
102156+ .extra1 = &neg_one,
102157+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
102158+ .extra2 = &three,
102159+#else
102160+ .extra2 = &two,
102161+#endif
102162 },
102163 {
102164 .procname = "perf_event_mlock_kb",
102165@@ -1348,6 +1389,13 @@ static struct ctl_table vm_table[] = {
102166 .proc_handler = proc_dointvec_minmax,
102167 .extra1 = &zero,
102168 },
102169+ {
102170+ .procname = "heap_stack_gap",
102171+ .data = &sysctl_heap_stack_gap,
102172+ .maxlen = sizeof(sysctl_heap_stack_gap),
102173+ .mode = 0644,
102174+ .proc_handler = proc_doulongvec_minmax,
102175+ },
102176 #else
102177 {
102178 .procname = "nr_trim_pages",
102179@@ -1830,6 +1878,16 @@ int proc_dostring(struct ctl_table *table, int write,
102180 (char __user *)buffer, lenp, ppos);
102181 }
102182
102183+int proc_dostring_modpriv(struct ctl_table *table, int write,
102184+ void __user *buffer, size_t *lenp, loff_t *ppos)
102185+{
102186+ if (write && !capable(CAP_SYS_MODULE))
102187+ return -EPERM;
102188+
102189+ return _proc_do_string(table->data, table->maxlen, write,
102190+ buffer, lenp, ppos);
102191+}
102192+
102193 static size_t proc_skip_spaces(char **buf)
102194 {
102195 size_t ret;
102196@@ -1935,6 +1993,8 @@ static int proc_put_long(void __user **buf, size_t *size, unsigned long val,
102197 len = strlen(tmp);
102198 if (len > *size)
102199 len = *size;
102200+ if (len > sizeof(tmp))
102201+ len = sizeof(tmp);
102202 if (copy_to_user(*buf, tmp, len))
102203 return -EFAULT;
102204 *size -= len;
102205@@ -2112,7 +2172,7 @@ int proc_dointvec(struct ctl_table *table, int write,
102206 static int proc_taint(struct ctl_table *table, int write,
102207 void __user *buffer, size_t *lenp, loff_t *ppos)
102208 {
102209- struct ctl_table t;
102210+ ctl_table_no_const t;
102211 unsigned long tmptaint = get_taint();
102212 int err;
102213
102214@@ -2140,7 +2200,6 @@ static int proc_taint(struct ctl_table *table, int write,
102215 return err;
102216 }
102217
102218-#ifdef CONFIG_PRINTK
102219 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
102220 void __user *buffer, size_t *lenp, loff_t *ppos)
102221 {
102222@@ -2149,7 +2208,6 @@ static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
102223
102224 return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
102225 }
102226-#endif
102227
102228 struct do_proc_dointvec_minmax_conv_param {
102229 int *min;
102230@@ -2709,6 +2767,12 @@ int proc_dostring(struct ctl_table *table, int write,
102231 return -ENOSYS;
102232 }
102233
102234+int proc_dostring_modpriv(struct ctl_table *table, int write,
102235+ void __user *buffer, size_t *lenp, loff_t *ppos)
102236+{
102237+ return -ENOSYS;
102238+}
102239+
102240 int proc_dointvec(struct ctl_table *table, int write,
102241 void __user *buffer, size_t *lenp, loff_t *ppos)
102242 {
102243@@ -2765,5 +2829,6 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
102244 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
102245 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
102246 EXPORT_SYMBOL(proc_dostring);
102247+EXPORT_SYMBOL(proc_dostring_modpriv);
102248 EXPORT_SYMBOL(proc_doulongvec_minmax);
102249 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
102250diff --git a/kernel/taskstats.c b/kernel/taskstats.c
102251index 21f82c2..c1984e5 100644
102252--- a/kernel/taskstats.c
102253+++ b/kernel/taskstats.c
102254@@ -28,9 +28,12 @@
102255 #include <linux/fs.h>
102256 #include <linux/file.h>
102257 #include <linux/pid_namespace.h>
102258+#include <linux/grsecurity.h>
102259 #include <net/genetlink.h>
102260 #include <linux/atomic.h>
102261
102262+extern int gr_is_taskstats_denied(int pid);
102263+
102264 /*
102265 * Maximum length of a cpumask that can be specified in
102266 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
102267@@ -567,6 +570,9 @@ err:
102268
102269 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
102270 {
102271+ if (gr_is_taskstats_denied(current->pid))
102272+ return -EACCES;
102273+
102274 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
102275 return cmd_attr_register_cpumask(info);
102276 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
102277diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
102278index 1b001ed..55ef9e4 100644
102279--- a/kernel/time/alarmtimer.c
102280+++ b/kernel/time/alarmtimer.c
102281@@ -823,7 +823,7 @@ static int __init alarmtimer_init(void)
102282 struct platform_device *pdev;
102283 int error = 0;
102284 int i;
102285- struct k_clock alarm_clock = {
102286+ static struct k_clock alarm_clock = {
102287 .clock_getres = alarm_clock_getres,
102288 .clock_get = alarm_clock_get,
102289 .timer_create = alarm_timer_create,
102290diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
102291index 38f586c..14386a7 100644
102292--- a/kernel/time/hrtimer.c
102293+++ b/kernel/time/hrtimer.c
102294@@ -1393,7 +1393,7 @@ void hrtimer_peek_ahead_timers(void)
102295 local_irq_restore(flags);
102296 }
102297
102298-static void run_hrtimer_softirq(struct softirq_action *h)
102299+static __latent_entropy void run_hrtimer_softirq(void)
102300 {
102301 hrtimer_peek_ahead_timers();
102302 }
102303diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
102304index 0075da7..63cc872 100644
102305--- a/kernel/time/posix-cpu-timers.c
102306+++ b/kernel/time/posix-cpu-timers.c
102307@@ -1449,14 +1449,14 @@ struct k_clock clock_posix_cpu = {
102308
102309 static __init int init_posix_cpu_timers(void)
102310 {
102311- struct k_clock process = {
102312+ static struct k_clock process = {
102313 .clock_getres = process_cpu_clock_getres,
102314 .clock_get = process_cpu_clock_get,
102315 .timer_create = process_cpu_timer_create,
102316 .nsleep = process_cpu_nsleep,
102317 .nsleep_restart = process_cpu_nsleep_restart,
102318 };
102319- struct k_clock thread = {
102320+ static struct k_clock thread = {
102321 .clock_getres = thread_cpu_clock_getres,
102322 .clock_get = thread_cpu_clock_get,
102323 .timer_create = thread_cpu_timer_create,
102324diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c
102325index 31ea01f..7fc61ef 100644
102326--- a/kernel/time/posix-timers.c
102327+++ b/kernel/time/posix-timers.c
102328@@ -43,6 +43,7 @@
102329 #include <linux/hash.h>
102330 #include <linux/posix-clock.h>
102331 #include <linux/posix-timers.h>
102332+#include <linux/grsecurity.h>
102333 #include <linux/syscalls.h>
102334 #include <linux/wait.h>
102335 #include <linux/workqueue.h>
102336@@ -124,7 +125,7 @@ static DEFINE_SPINLOCK(hash_lock);
102337 * which we beg off on and pass to do_sys_settimeofday().
102338 */
102339
102340-static struct k_clock posix_clocks[MAX_CLOCKS];
102341+static struct k_clock *posix_clocks[MAX_CLOCKS];
102342
102343 /*
102344 * These ones are defined below.
102345@@ -277,7 +278,7 @@ static int posix_get_tai(clockid_t which_clock, struct timespec *tp)
102346 */
102347 static __init int init_posix_timers(void)
102348 {
102349- struct k_clock clock_realtime = {
102350+ static struct k_clock clock_realtime = {
102351 .clock_getres = hrtimer_get_res,
102352 .clock_get = posix_clock_realtime_get,
102353 .clock_set = posix_clock_realtime_set,
102354@@ -289,7 +290,7 @@ static __init int init_posix_timers(void)
102355 .timer_get = common_timer_get,
102356 .timer_del = common_timer_del,
102357 };
102358- struct k_clock clock_monotonic = {
102359+ static struct k_clock clock_monotonic = {
102360 .clock_getres = hrtimer_get_res,
102361 .clock_get = posix_ktime_get_ts,
102362 .nsleep = common_nsleep,
102363@@ -299,19 +300,19 @@ static __init int init_posix_timers(void)
102364 .timer_get = common_timer_get,
102365 .timer_del = common_timer_del,
102366 };
102367- struct k_clock clock_monotonic_raw = {
102368+ static struct k_clock clock_monotonic_raw = {
102369 .clock_getres = hrtimer_get_res,
102370 .clock_get = posix_get_monotonic_raw,
102371 };
102372- struct k_clock clock_realtime_coarse = {
102373+ static struct k_clock clock_realtime_coarse = {
102374 .clock_getres = posix_get_coarse_res,
102375 .clock_get = posix_get_realtime_coarse,
102376 };
102377- struct k_clock clock_monotonic_coarse = {
102378+ static struct k_clock clock_monotonic_coarse = {
102379 .clock_getres = posix_get_coarse_res,
102380 .clock_get = posix_get_monotonic_coarse,
102381 };
102382- struct k_clock clock_tai = {
102383+ static struct k_clock clock_tai = {
102384 .clock_getres = hrtimer_get_res,
102385 .clock_get = posix_get_tai,
102386 .nsleep = common_nsleep,
102387@@ -321,7 +322,7 @@ static __init int init_posix_timers(void)
102388 .timer_get = common_timer_get,
102389 .timer_del = common_timer_del,
102390 };
102391- struct k_clock clock_boottime = {
102392+ static struct k_clock clock_boottime = {
102393 .clock_getres = hrtimer_get_res,
102394 .clock_get = posix_get_boottime,
102395 .nsleep = common_nsleep,
102396@@ -533,7 +534,7 @@ void posix_timers_register_clock(const clockid_t clock_id,
102397 return;
102398 }
102399
102400- posix_clocks[clock_id] = *new_clock;
102401+ posix_clocks[clock_id] = new_clock;
102402 }
102403 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
102404
102405@@ -579,9 +580,9 @@ static struct k_clock *clockid_to_kclock(const clockid_t id)
102406 return (id & CLOCKFD_MASK) == CLOCKFD ?
102407 &clock_posix_dynamic : &clock_posix_cpu;
102408
102409- if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
102410+ if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
102411 return NULL;
102412- return &posix_clocks[id];
102413+ return posix_clocks[id];
102414 }
102415
102416 static int common_timer_create(struct k_itimer *new_timer)
102417@@ -599,7 +600,7 @@ SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock,
102418 struct k_clock *kc = clockid_to_kclock(which_clock);
102419 struct k_itimer *new_timer;
102420 int error, new_timer_id;
102421- sigevent_t event;
102422+ sigevent_t event = { };
102423 int it_id_set = IT_ID_NOT_SET;
102424
102425 if (!kc)
102426@@ -1014,6 +1015,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
102427 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
102428 return -EFAULT;
102429
102430+ /* only the CLOCK_REALTIME clock can be set, all other clocks
102431+ have their clock_set fptr set to a nosettime dummy function
102432+ CLOCK_REALTIME has a NULL clock_set fptr which causes it to
102433+ call common_clock_set, which calls do_sys_settimeofday, which
102434+ we hook
102435+ */
102436+
102437 return kc->clock_set(which_clock, &new_tp);
102438 }
102439
102440diff --git a/kernel/time/time.c b/kernel/time/time.c
102441index 2c85b77..6530536 100644
102442--- a/kernel/time/time.c
102443+++ b/kernel/time/time.c
102444@@ -173,6 +173,11 @@ int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz)
102445 return error;
102446
102447 if (tz) {
102448+ /* we log in do_settimeofday called below, so don't log twice
102449+ */
102450+ if (!tv)
102451+ gr_log_timechange();
102452+
102453 sys_tz = *tz;
102454 update_vsyscall_tz();
102455 if (firsttime) {
102456diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
102457index 91db941..a371671 100644
102458--- a/kernel/time/timekeeping.c
102459+++ b/kernel/time/timekeeping.c
102460@@ -15,6 +15,7 @@
102461 #include <linux/init.h>
102462 #include <linux/mm.h>
102463 #include <linux/sched.h>
102464+#include <linux/grsecurity.h>
102465 #include <linux/syscore_ops.h>
102466 #include <linux/clocksource.h>
102467 #include <linux/jiffies.h>
102468@@ -802,6 +803,8 @@ int do_settimeofday64(const struct timespec64 *ts)
102469 if (!timespec64_valid_strict(ts))
102470 return -EINVAL;
102471
102472+ gr_log_timechange();
102473+
102474 raw_spin_lock_irqsave(&timekeeper_lock, flags);
102475 write_seqcount_begin(&tk_core.seq);
102476
102477diff --git a/kernel/time/timer.c b/kernel/time/timer.c
102478index 2d3f5c5..7ed7dc5 100644
102479--- a/kernel/time/timer.c
102480+++ b/kernel/time/timer.c
102481@@ -1393,7 +1393,7 @@ void update_process_times(int user_tick)
102482 /*
102483 * This function runs timers and the timer-tq in bottom half context.
102484 */
102485-static void run_timer_softirq(struct softirq_action *h)
102486+static __latent_entropy void run_timer_softirq(void)
102487 {
102488 struct tvec_base *base = __this_cpu_read(tvec_bases);
102489
102490@@ -1456,7 +1456,7 @@ static void process_timeout(unsigned long __data)
102491 *
102492 * In all cases the return value is guaranteed to be non-negative.
102493 */
102494-signed long __sched schedule_timeout(signed long timeout)
102495+signed long __sched __intentional_overflow(-1) schedule_timeout(signed long timeout)
102496 {
102497 struct timer_list timer;
102498 unsigned long expire;
102499diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
102500index 61ed862..3b52c65 100644
102501--- a/kernel/time/timer_list.c
102502+++ b/kernel/time/timer_list.c
102503@@ -45,12 +45,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
102504
102505 static void print_name_offset(struct seq_file *m, void *sym)
102506 {
102507+#ifdef CONFIG_GRKERNSEC_HIDESYM
102508+ SEQ_printf(m, "<%p>", NULL);
102509+#else
102510 char symname[KSYM_NAME_LEN];
102511
102512 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
102513 SEQ_printf(m, "<%pK>", sym);
102514 else
102515 SEQ_printf(m, "%s", symname);
102516+#endif
102517 }
102518
102519 static void
102520@@ -119,7 +123,11 @@ next_one:
102521 static void
102522 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
102523 {
102524+#ifdef CONFIG_GRKERNSEC_HIDESYM
102525+ SEQ_printf(m, " .base: %p\n", NULL);
102526+#else
102527 SEQ_printf(m, " .base: %pK\n", base);
102528+#endif
102529 SEQ_printf(m, " .index: %d\n",
102530 base->index);
102531 SEQ_printf(m, " .resolution: %Lu nsecs\n",
102532@@ -362,7 +370,11 @@ static int __init init_timer_list_procfs(void)
102533 {
102534 struct proc_dir_entry *pe;
102535
102536+#ifdef CONFIG_GRKERNSEC_PROC_ADD
102537+ pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
102538+#else
102539 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
102540+#endif
102541 if (!pe)
102542 return -ENOMEM;
102543 return 0;
102544diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
102545index 1fb08f2..ca4bb1e 100644
102546--- a/kernel/time/timer_stats.c
102547+++ b/kernel/time/timer_stats.c
102548@@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
102549 static unsigned long nr_entries;
102550 static struct entry entries[MAX_ENTRIES];
102551
102552-static atomic_t overflow_count;
102553+static atomic_unchecked_t overflow_count;
102554
102555 /*
102556 * The entries are in a hash-table, for fast lookup:
102557@@ -140,7 +140,7 @@ static void reset_entries(void)
102558 nr_entries = 0;
102559 memset(entries, 0, sizeof(entries));
102560 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
102561- atomic_set(&overflow_count, 0);
102562+ atomic_set_unchecked(&overflow_count, 0);
102563 }
102564
102565 static struct entry *alloc_entry(void)
102566@@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
102567 if (likely(entry))
102568 entry->count++;
102569 else
102570- atomic_inc(&overflow_count);
102571+ atomic_inc_unchecked(&overflow_count);
102572
102573 out_unlock:
102574 raw_spin_unlock_irqrestore(lock, flags);
102575@@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
102576
102577 static void print_name_offset(struct seq_file *m, unsigned long addr)
102578 {
102579+#ifdef CONFIG_GRKERNSEC_HIDESYM
102580+ seq_printf(m, "<%p>", NULL);
102581+#else
102582 char symname[KSYM_NAME_LEN];
102583
102584 if (lookup_symbol_name(addr, symname) < 0)
102585- seq_printf(m, "<%p>", (void *)addr);
102586+ seq_printf(m, "<%pK>", (void *)addr);
102587 else
102588 seq_printf(m, "%s", symname);
102589+#endif
102590 }
102591
102592 static int tstats_show(struct seq_file *m, void *v)
102593@@ -300,8 +304,8 @@ static int tstats_show(struct seq_file *m, void *v)
102594
102595 seq_puts(m, "Timer Stats Version: v0.3\n");
102596 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
102597- if (atomic_read(&overflow_count))
102598- seq_printf(m, "Overflow: %d entries\n", atomic_read(&overflow_count));
102599+ if (atomic_read_unchecked(&overflow_count))
102600+ seq_printf(m, "Overflow: %d entries\n", atomic_read_unchecked(&overflow_count));
102601 seq_printf(m, "Collection: %s\n", timer_stats_active ? "active" : "inactive");
102602
102603 for (i = 0; i < nr_entries; i++) {
102604@@ -417,7 +421,11 @@ static int __init init_tstats_procfs(void)
102605 {
102606 struct proc_dir_entry *pe;
102607
102608+#ifdef CONFIG_GRKERNSEC_PROC_ADD
102609+ pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
102610+#else
102611 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
102612+#endif
102613 if (!pe)
102614 return -ENOMEM;
102615 return 0;
102616diff --git a/kernel/torture.c b/kernel/torture.c
102617index dd70993..0bf694b 100644
102618--- a/kernel/torture.c
102619+++ b/kernel/torture.c
102620@@ -482,7 +482,7 @@ static int torture_shutdown_notify(struct notifier_block *unused1,
102621 mutex_lock(&fullstop_mutex);
102622 if (ACCESS_ONCE(fullstop) == FULLSTOP_DONTSTOP) {
102623 VERBOSE_TOROUT_STRING("Unscheduled system shutdown detected");
102624- ACCESS_ONCE(fullstop) = FULLSTOP_SHUTDOWN;
102625+ ACCESS_ONCE_RW(fullstop) = FULLSTOP_SHUTDOWN;
102626 } else {
102627 pr_warn("Concurrent rmmod and shutdown illegal!\n");
102628 }
102629@@ -549,14 +549,14 @@ static int torture_stutter(void *arg)
102630 if (!torture_must_stop()) {
102631 if (stutter > 1) {
102632 schedule_timeout_interruptible(stutter - 1);
102633- ACCESS_ONCE(stutter_pause_test) = 2;
102634+ ACCESS_ONCE_RW(stutter_pause_test) = 2;
102635 }
102636 schedule_timeout_interruptible(1);
102637- ACCESS_ONCE(stutter_pause_test) = 1;
102638+ ACCESS_ONCE_RW(stutter_pause_test) = 1;
102639 }
102640 if (!torture_must_stop())
102641 schedule_timeout_interruptible(stutter);
102642- ACCESS_ONCE(stutter_pause_test) = 0;
102643+ ACCESS_ONCE_RW(stutter_pause_test) = 0;
102644 torture_shutdown_absorb("torture_stutter");
102645 } while (!torture_must_stop());
102646 torture_kthread_stopping("torture_stutter");
102647@@ -648,7 +648,7 @@ bool torture_cleanup_begin(void)
102648 schedule_timeout_uninterruptible(10);
102649 return true;
102650 }
102651- ACCESS_ONCE(fullstop) = FULLSTOP_RMMOD;
102652+ ACCESS_ONCE_RW(fullstop) = FULLSTOP_RMMOD;
102653 mutex_unlock(&fullstop_mutex);
102654 torture_shutdown_cleanup();
102655 torture_shuffle_cleanup();
102656diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
102657index 483cecf..ac46091 100644
102658--- a/kernel/trace/blktrace.c
102659+++ b/kernel/trace/blktrace.c
102660@@ -328,7 +328,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
102661 struct blk_trace *bt = filp->private_data;
102662 char buf[16];
102663
102664- snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
102665+ snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
102666
102667 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
102668 }
102669@@ -386,7 +386,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
102670 return 1;
102671
102672 bt = buf->chan->private_data;
102673- atomic_inc(&bt->dropped);
102674+ atomic_inc_unchecked(&bt->dropped);
102675 return 0;
102676 }
102677
102678@@ -487,7 +487,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
102679
102680 bt->dir = dir;
102681 bt->dev = dev;
102682- atomic_set(&bt->dropped, 0);
102683+ atomic_set_unchecked(&bt->dropped, 0);
102684 INIT_LIST_HEAD(&bt->running_list);
102685
102686 ret = -EIO;
102687diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
102688index 4f22802..bd268b1 100644
102689--- a/kernel/trace/ftrace.c
102690+++ b/kernel/trace/ftrace.c
102691@@ -2382,12 +2382,17 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
102692 if (unlikely(ftrace_disabled))
102693 return 0;
102694
102695+ ret = ftrace_arch_code_modify_prepare();
102696+ FTRACE_WARN_ON(ret);
102697+ if (ret)
102698+ return 0;
102699+
102700 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
102701+ FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
102702 if (ret) {
102703 ftrace_bug(ret, rec);
102704- return 0;
102705 }
102706- return 1;
102707+ return ret ? 0 : 1;
102708 }
102709
102710 /*
102711@@ -4776,8 +4781,10 @@ static int ftrace_process_locs(struct module *mod,
102712 if (!count)
102713 return 0;
102714
102715+ pax_open_kernel();
102716 sort(start, count, sizeof(*start),
102717 ftrace_cmp_ips, ftrace_swap_ips);
102718+ pax_close_kernel();
102719
102720 start_pg = ftrace_allocate_pages(count);
102721 if (!start_pg)
102722@@ -5653,7 +5660,7 @@ static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
102723
102724 if (t->ret_stack == NULL) {
102725 atomic_set(&t->tracing_graph_pause, 0);
102726- atomic_set(&t->trace_overrun, 0);
102727+ atomic_set_unchecked(&t->trace_overrun, 0);
102728 t->curr_ret_stack = -1;
102729 /* Make sure the tasks see the -1 first: */
102730 smp_wmb();
102731@@ -5876,7 +5883,7 @@ static void
102732 graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
102733 {
102734 atomic_set(&t->tracing_graph_pause, 0);
102735- atomic_set(&t->trace_overrun, 0);
102736+ atomic_set_unchecked(&t->trace_overrun, 0);
102737 t->ftrace_timestamp = 0;
102738 /* make curr_ret_stack visible before we add the ret_stack */
102739 smp_wmb();
102740diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
102741index 922048a..bb71a55 100644
102742--- a/kernel/trace/ring_buffer.c
102743+++ b/kernel/trace/ring_buffer.c
102744@@ -348,9 +348,9 @@ struct buffer_data_page {
102745 */
102746 struct buffer_page {
102747 struct list_head list; /* list of buffer pages */
102748- local_t write; /* index for next write */
102749+ local_unchecked_t write; /* index for next write */
102750 unsigned read; /* index for next read */
102751- local_t entries; /* entries on this page */
102752+ local_unchecked_t entries; /* entries on this page */
102753 unsigned long real_end; /* real end of data */
102754 struct buffer_data_page *page; /* Actual data page */
102755 };
102756@@ -471,11 +471,11 @@ struct ring_buffer_per_cpu {
102757 unsigned long last_overrun;
102758 local_t entries_bytes;
102759 local_t entries;
102760- local_t overrun;
102761- local_t commit_overrun;
102762- local_t dropped_events;
102763+ local_unchecked_t overrun;
102764+ local_unchecked_t commit_overrun;
102765+ local_unchecked_t dropped_events;
102766 local_t committing;
102767- local_t commits;
102768+ local_unchecked_t commits;
102769 unsigned long read;
102770 unsigned long read_bytes;
102771 u64 write_stamp;
102772@@ -1045,8 +1045,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
102773 *
102774 * We add a counter to the write field to denote this.
102775 */
102776- old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
102777- old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
102778+ old_write = local_add_return_unchecked(RB_WRITE_INTCNT, &next_page->write);
102779+ old_entries = local_add_return_unchecked(RB_WRITE_INTCNT, &next_page->entries);
102780
102781 /*
102782 * Just make sure we have seen our old_write and synchronize
102783@@ -1074,8 +1074,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
102784 * cmpxchg to only update if an interrupt did not already
102785 * do it for us. If the cmpxchg fails, we don't care.
102786 */
102787- (void)local_cmpxchg(&next_page->write, old_write, val);
102788- (void)local_cmpxchg(&next_page->entries, old_entries, eval);
102789+ (void)local_cmpxchg_unchecked(&next_page->write, old_write, val);
102790+ (void)local_cmpxchg_unchecked(&next_page->entries, old_entries, eval);
102791
102792 /*
102793 * No need to worry about races with clearing out the commit.
102794@@ -1443,12 +1443,12 @@ static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
102795
102796 static inline unsigned long rb_page_entries(struct buffer_page *bpage)
102797 {
102798- return local_read(&bpage->entries) & RB_WRITE_MASK;
102799+ return local_read_unchecked(&bpage->entries) & RB_WRITE_MASK;
102800 }
102801
102802 static inline unsigned long rb_page_write(struct buffer_page *bpage)
102803 {
102804- return local_read(&bpage->write) & RB_WRITE_MASK;
102805+ return local_read_unchecked(&bpage->write) & RB_WRITE_MASK;
102806 }
102807
102808 static int
102809@@ -1543,7 +1543,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned int nr_pages)
102810 * bytes consumed in ring buffer from here.
102811 * Increment overrun to account for the lost events.
102812 */
102813- local_add(page_entries, &cpu_buffer->overrun);
102814+ local_add_unchecked(page_entries, &cpu_buffer->overrun);
102815 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
102816 }
102817
102818@@ -2105,7 +2105,7 @@ rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
102819 * it is our responsibility to update
102820 * the counters.
102821 */
102822- local_add(entries, &cpu_buffer->overrun);
102823+ local_add_unchecked(entries, &cpu_buffer->overrun);
102824 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
102825
102826 /*
102827@@ -2255,7 +2255,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
102828 if (tail == BUF_PAGE_SIZE)
102829 tail_page->real_end = 0;
102830
102831- local_sub(length, &tail_page->write);
102832+ local_sub_unchecked(length, &tail_page->write);
102833 return;
102834 }
102835
102836@@ -2290,7 +2290,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
102837 rb_event_set_padding(event);
102838
102839 /* Set the write back to the previous setting */
102840- local_sub(length, &tail_page->write);
102841+ local_sub_unchecked(length, &tail_page->write);
102842 return;
102843 }
102844
102845@@ -2302,7 +2302,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
102846
102847 /* Set write to end of buffer */
102848 length = (tail + length) - BUF_PAGE_SIZE;
102849- local_sub(length, &tail_page->write);
102850+ local_sub_unchecked(length, &tail_page->write);
102851 }
102852
102853 /*
102854@@ -2328,7 +2328,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
102855 * about it.
102856 */
102857 if (unlikely(next_page == commit_page)) {
102858- local_inc(&cpu_buffer->commit_overrun);
102859+ local_inc_unchecked(&cpu_buffer->commit_overrun);
102860 goto out_reset;
102861 }
102862
102863@@ -2358,7 +2358,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
102864 * this is easy, just stop here.
102865 */
102866 if (!(buffer->flags & RB_FL_OVERWRITE)) {
102867- local_inc(&cpu_buffer->dropped_events);
102868+ local_inc_unchecked(&cpu_buffer->dropped_events);
102869 goto out_reset;
102870 }
102871
102872@@ -2384,7 +2384,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
102873 cpu_buffer->tail_page) &&
102874 (cpu_buffer->commit_page ==
102875 cpu_buffer->reader_page))) {
102876- local_inc(&cpu_buffer->commit_overrun);
102877+ local_inc_unchecked(&cpu_buffer->commit_overrun);
102878 goto out_reset;
102879 }
102880 }
102881@@ -2432,7 +2432,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
102882 length += RB_LEN_TIME_EXTEND;
102883
102884 tail_page = cpu_buffer->tail_page;
102885- write = local_add_return(length, &tail_page->write);
102886+ write = local_add_return_unchecked(length, &tail_page->write);
102887
102888 /* set write to only the index of the write */
102889 write &= RB_WRITE_MASK;
102890@@ -2456,7 +2456,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
102891 kmemcheck_annotate_bitfield(event, bitfield);
102892 rb_update_event(cpu_buffer, event, length, add_timestamp, delta);
102893
102894- local_inc(&tail_page->entries);
102895+ local_inc_unchecked(&tail_page->entries);
102896
102897 /*
102898 * If this is the first commit on the page, then update
102899@@ -2489,7 +2489,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
102900
102901 if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
102902 unsigned long write_mask =
102903- local_read(&bpage->write) & ~RB_WRITE_MASK;
102904+ local_read_unchecked(&bpage->write) & ~RB_WRITE_MASK;
102905 unsigned long event_length = rb_event_length(event);
102906 /*
102907 * This is on the tail page. It is possible that
102908@@ -2499,7 +2499,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
102909 */
102910 old_index += write_mask;
102911 new_index += write_mask;
102912- index = local_cmpxchg(&bpage->write, old_index, new_index);
102913+ index = local_cmpxchg_unchecked(&bpage->write, old_index, new_index);
102914 if (index == old_index) {
102915 /* update counters */
102916 local_sub(event_length, &cpu_buffer->entries_bytes);
102917@@ -2514,7 +2514,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
102918 static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer)
102919 {
102920 local_inc(&cpu_buffer->committing);
102921- local_inc(&cpu_buffer->commits);
102922+ local_inc_unchecked(&cpu_buffer->commits);
102923 }
102924
102925 static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer)
102926@@ -2526,7 +2526,7 @@ static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer)
102927 return;
102928
102929 again:
102930- commits = local_read(&cpu_buffer->commits);
102931+ commits = local_read_unchecked(&cpu_buffer->commits);
102932 /* synchronize with interrupts */
102933 barrier();
102934 if (local_read(&cpu_buffer->committing) == 1)
102935@@ -2542,7 +2542,7 @@ static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer)
102936 * updating of the commit page and the clearing of the
102937 * committing counter.
102938 */
102939- if (unlikely(local_read(&cpu_buffer->commits) != commits) &&
102940+ if (unlikely(local_read_unchecked(&cpu_buffer->commits) != commits) &&
102941 !local_read(&cpu_buffer->committing)) {
102942 local_inc(&cpu_buffer->committing);
102943 goto again;
102944@@ -2572,7 +2572,7 @@ rb_reserve_next_event(struct ring_buffer *buffer,
102945 barrier();
102946 if (unlikely(ACCESS_ONCE(cpu_buffer->buffer) != buffer)) {
102947 local_dec(&cpu_buffer->committing);
102948- local_dec(&cpu_buffer->commits);
102949+ local_dec_unchecked(&cpu_buffer->commits);
102950 return NULL;
102951 }
102952 #endif
102953@@ -2901,7 +2901,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
102954
102955 /* Do the likely case first */
102956 if (likely(bpage->page == (void *)addr)) {
102957- local_dec(&bpage->entries);
102958+ local_dec_unchecked(&bpage->entries);
102959 return;
102960 }
102961
102962@@ -2913,7 +2913,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
102963 start = bpage;
102964 do {
102965 if (bpage->page == (void *)addr) {
102966- local_dec(&bpage->entries);
102967+ local_dec_unchecked(&bpage->entries);
102968 return;
102969 }
102970 rb_inc_page(cpu_buffer, &bpage);
102971@@ -3197,7 +3197,7 @@ static inline unsigned long
102972 rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
102973 {
102974 return local_read(&cpu_buffer->entries) -
102975- (local_read(&cpu_buffer->overrun) + cpu_buffer->read);
102976+ (local_read_unchecked(&cpu_buffer->overrun) + cpu_buffer->read);
102977 }
102978
102979 /**
102980@@ -3286,7 +3286,7 @@ unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
102981 return 0;
102982
102983 cpu_buffer = buffer->buffers[cpu];
102984- ret = local_read(&cpu_buffer->overrun);
102985+ ret = local_read_unchecked(&cpu_buffer->overrun);
102986
102987 return ret;
102988 }
102989@@ -3309,7 +3309,7 @@ ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
102990 return 0;
102991
102992 cpu_buffer = buffer->buffers[cpu];
102993- ret = local_read(&cpu_buffer->commit_overrun);
102994+ ret = local_read_unchecked(&cpu_buffer->commit_overrun);
102995
102996 return ret;
102997 }
102998@@ -3331,7 +3331,7 @@ ring_buffer_dropped_events_cpu(struct ring_buffer *buffer, int cpu)
102999 return 0;
103000
103001 cpu_buffer = buffer->buffers[cpu];
103002- ret = local_read(&cpu_buffer->dropped_events);
103003+ ret = local_read_unchecked(&cpu_buffer->dropped_events);
103004
103005 return ret;
103006 }
103007@@ -3394,7 +3394,7 @@ unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
103008 /* if you care about this being correct, lock the buffer */
103009 for_each_buffer_cpu(buffer, cpu) {
103010 cpu_buffer = buffer->buffers[cpu];
103011- overruns += local_read(&cpu_buffer->overrun);
103012+ overruns += local_read_unchecked(&cpu_buffer->overrun);
103013 }
103014
103015 return overruns;
103016@@ -3565,8 +3565,8 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
103017 /*
103018 * Reset the reader page to size zero.
103019 */
103020- local_set(&cpu_buffer->reader_page->write, 0);
103021- local_set(&cpu_buffer->reader_page->entries, 0);
103022+ local_set_unchecked(&cpu_buffer->reader_page->write, 0);
103023+ local_set_unchecked(&cpu_buffer->reader_page->entries, 0);
103024 local_set(&cpu_buffer->reader_page->page->commit, 0);
103025 cpu_buffer->reader_page->real_end = 0;
103026
103027@@ -3600,7 +3600,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
103028 * want to compare with the last_overrun.
103029 */
103030 smp_mb();
103031- overwrite = local_read(&(cpu_buffer->overrun));
103032+ overwrite = local_read_unchecked(&(cpu_buffer->overrun));
103033
103034 /*
103035 * Here's the tricky part.
103036@@ -4172,8 +4172,8 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
103037
103038 cpu_buffer->head_page
103039 = list_entry(cpu_buffer->pages, struct buffer_page, list);
103040- local_set(&cpu_buffer->head_page->write, 0);
103041- local_set(&cpu_buffer->head_page->entries, 0);
103042+ local_set_unchecked(&cpu_buffer->head_page->write, 0);
103043+ local_set_unchecked(&cpu_buffer->head_page->entries, 0);
103044 local_set(&cpu_buffer->head_page->page->commit, 0);
103045
103046 cpu_buffer->head_page->read = 0;
103047@@ -4183,18 +4183,18 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
103048
103049 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
103050 INIT_LIST_HEAD(&cpu_buffer->new_pages);
103051- local_set(&cpu_buffer->reader_page->write, 0);
103052- local_set(&cpu_buffer->reader_page->entries, 0);
103053+ local_set_unchecked(&cpu_buffer->reader_page->write, 0);
103054+ local_set_unchecked(&cpu_buffer->reader_page->entries, 0);
103055 local_set(&cpu_buffer->reader_page->page->commit, 0);
103056 cpu_buffer->reader_page->read = 0;
103057
103058 local_set(&cpu_buffer->entries_bytes, 0);
103059- local_set(&cpu_buffer->overrun, 0);
103060- local_set(&cpu_buffer->commit_overrun, 0);
103061- local_set(&cpu_buffer->dropped_events, 0);
103062+ local_set_unchecked(&cpu_buffer->overrun, 0);
103063+ local_set_unchecked(&cpu_buffer->commit_overrun, 0);
103064+ local_set_unchecked(&cpu_buffer->dropped_events, 0);
103065 local_set(&cpu_buffer->entries, 0);
103066 local_set(&cpu_buffer->committing, 0);
103067- local_set(&cpu_buffer->commits, 0);
103068+ local_set_unchecked(&cpu_buffer->commits, 0);
103069 cpu_buffer->read = 0;
103070 cpu_buffer->read_bytes = 0;
103071
103072@@ -4595,8 +4595,8 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
103073 rb_init_page(bpage);
103074 bpage = reader->page;
103075 reader->page = *data_page;
103076- local_set(&reader->write, 0);
103077- local_set(&reader->entries, 0);
103078+ local_set_unchecked(&reader->write, 0);
103079+ local_set_unchecked(&reader->entries, 0);
103080 reader->read = 0;
103081 *data_page = bpage;
103082
103083diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
103084index 62c6506..5c25989 100644
103085--- a/kernel/trace/trace.c
103086+++ b/kernel/trace/trace.c
103087@@ -3500,7 +3500,7 @@ int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
103088 return 0;
103089 }
103090
103091-int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
103092+int set_tracer_flag(struct trace_array *tr, unsigned long mask, int enabled)
103093 {
103094 /* do nothing if flag is already set */
103095 if (!!(trace_flags & mask) == !!enabled)
103096diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
103097index dd8205a..1aae87a 100644
103098--- a/kernel/trace/trace.h
103099+++ b/kernel/trace/trace.h
103100@@ -1271,7 +1271,7 @@ extern const char *__stop___tracepoint_str[];
103101 void trace_printk_init_buffers(void);
103102 void trace_printk_start_comm(void);
103103 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
103104-int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled);
103105+int set_tracer_flag(struct trace_array *tr, unsigned long mask, int enabled);
103106
103107 /*
103108 * Normal trace_printk() and friends allocates special buffers
103109diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c
103110index 57b67b1..66082a9 100644
103111--- a/kernel/trace/trace_clock.c
103112+++ b/kernel/trace/trace_clock.c
103113@@ -124,7 +124,7 @@ u64 notrace trace_clock_global(void)
103114 return now;
103115 }
103116
103117-static atomic64_t trace_counter;
103118+static atomic64_unchecked_t trace_counter;
103119
103120 /*
103121 * trace_clock_counter(): simply an atomic counter.
103122@@ -133,5 +133,5 @@ static atomic64_t trace_counter;
103123 */
103124 u64 notrace trace_clock_counter(void)
103125 {
103126- return atomic64_add_return(1, &trace_counter);
103127+ return atomic64_inc_return_unchecked(&trace_counter);
103128 }
103129diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
103130index a9c10a3..1864f6b 100644
103131--- a/kernel/trace/trace_events.c
103132+++ b/kernel/trace/trace_events.c
103133@@ -1762,7 +1762,6 @@ __trace_early_add_new_event(struct ftrace_event_call *call,
103134 return 0;
103135 }
103136
103137-struct ftrace_module_file_ops;
103138 static void __add_event_to_tracers(struct ftrace_event_call *call);
103139
103140 /* Add an additional event_call dynamically */
103141diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
103142index 7f2e97c..085a257 100644
103143--- a/kernel/trace/trace_events_filter.c
103144+++ b/kernel/trace/trace_events_filter.c
103145@@ -1056,6 +1056,9 @@ static void parse_init(struct filter_parse_state *ps,
103146
103147 static char infix_next(struct filter_parse_state *ps)
103148 {
103149+ if (!ps->infix.cnt)
103150+ return 0;
103151+
103152 ps->infix.cnt--;
103153
103154 return ps->infix.string[ps->infix.tail++];
103155@@ -1071,6 +1074,9 @@ static char infix_peek(struct filter_parse_state *ps)
103156
103157 static void infix_advance(struct filter_parse_state *ps)
103158 {
103159+ if (!ps->infix.cnt)
103160+ return;
103161+
103162 ps->infix.cnt--;
103163 ps->infix.tail++;
103164 }
103165diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
103166index b6fce36..d9f11a3 100644
103167--- a/kernel/trace/trace_functions_graph.c
103168+++ b/kernel/trace/trace_functions_graph.c
103169@@ -133,7 +133,7 @@ ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
103170
103171 /* The return trace stack is full */
103172 if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
103173- atomic_inc(&current->trace_overrun);
103174+ atomic_inc_unchecked(&current->trace_overrun);
103175 return -EBUSY;
103176 }
103177
103178@@ -230,7 +230,7 @@ ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
103179 *ret = current->ret_stack[index].ret;
103180 trace->func = current->ret_stack[index].func;
103181 trace->calltime = current->ret_stack[index].calltime;
103182- trace->overrun = atomic_read(&current->trace_overrun);
103183+ trace->overrun = atomic_read_unchecked(&current->trace_overrun);
103184 trace->depth = index;
103185 }
103186
103187diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
103188index 7a9ba62..2e0e4a1 100644
103189--- a/kernel/trace/trace_mmiotrace.c
103190+++ b/kernel/trace/trace_mmiotrace.c
103191@@ -24,7 +24,7 @@ struct header_iter {
103192 static struct trace_array *mmio_trace_array;
103193 static bool overrun_detected;
103194 static unsigned long prev_overruns;
103195-static atomic_t dropped_count;
103196+static atomic_unchecked_t dropped_count;
103197
103198 static void mmio_reset_data(struct trace_array *tr)
103199 {
103200@@ -124,7 +124,7 @@ static void mmio_close(struct trace_iterator *iter)
103201
103202 static unsigned long count_overruns(struct trace_iterator *iter)
103203 {
103204- unsigned long cnt = atomic_xchg(&dropped_count, 0);
103205+ unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
103206 unsigned long over = ring_buffer_overruns(iter->trace_buffer->buffer);
103207
103208 if (over > prev_overruns)
103209@@ -307,7 +307,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
103210 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
103211 sizeof(*entry), 0, pc);
103212 if (!event) {
103213- atomic_inc(&dropped_count);
103214+ atomic_inc_unchecked(&dropped_count);
103215 return;
103216 }
103217 entry = ring_buffer_event_data(event);
103218@@ -337,7 +337,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
103219 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
103220 sizeof(*entry), 0, pc);
103221 if (!event) {
103222- atomic_inc(&dropped_count);
103223+ atomic_inc_unchecked(&dropped_count);
103224 return;
103225 }
103226 entry = ring_buffer_event_data(event);
103227diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
103228index 692bf71..6d9a9cd 100644
103229--- a/kernel/trace/trace_output.c
103230+++ b/kernel/trace/trace_output.c
103231@@ -751,14 +751,16 @@ int register_ftrace_event(struct trace_event *event)
103232 goto out;
103233 }
103234
103235+ pax_open_kernel();
103236 if (event->funcs->trace == NULL)
103237- event->funcs->trace = trace_nop_print;
103238+ *(void **)&event->funcs->trace = trace_nop_print;
103239 if (event->funcs->raw == NULL)
103240- event->funcs->raw = trace_nop_print;
103241+ *(void **)&event->funcs->raw = trace_nop_print;
103242 if (event->funcs->hex == NULL)
103243- event->funcs->hex = trace_nop_print;
103244+ *(void **)&event->funcs->hex = trace_nop_print;
103245 if (event->funcs->binary == NULL)
103246- event->funcs->binary = trace_nop_print;
103247+ *(void **)&event->funcs->binary = trace_nop_print;
103248+ pax_close_kernel();
103249
103250 key = event->type & (EVENT_HASHSIZE - 1);
103251
103252diff --git a/kernel/trace/trace_seq.c b/kernel/trace/trace_seq.c
103253index e694c9f..6775a38 100644
103254--- a/kernel/trace/trace_seq.c
103255+++ b/kernel/trace/trace_seq.c
103256@@ -337,7 +337,7 @@ int trace_seq_path(struct trace_seq *s, const struct path *path)
103257 return 0;
103258 }
103259
103260- seq_buf_path(&s->seq, path, "\n");
103261+ seq_buf_path(&s->seq, path, "\n\\");
103262
103263 if (unlikely(seq_buf_has_overflowed(&s->seq))) {
103264 s->seq.len = save_len;
103265diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
103266index c3e4fcf..ef6cc43 100644
103267--- a/kernel/trace/trace_stack.c
103268+++ b/kernel/trace/trace_stack.c
103269@@ -88,7 +88,7 @@ check_stack(unsigned long ip, unsigned long *stack)
103270 return;
103271
103272 /* we do not handle interrupt stacks yet */
103273- if (!object_is_on_stack(stack))
103274+ if (!object_starts_on_stack(stack))
103275 return;
103276
103277 local_irq_save(flags);
103278diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
103279index f97f6e3..d367b48 100644
103280--- a/kernel/trace/trace_syscalls.c
103281+++ b/kernel/trace/trace_syscalls.c
103282@@ -590,6 +590,8 @@ static int perf_sysenter_enable(struct ftrace_event_call *call)
103283 int num;
103284
103285 num = ((struct syscall_metadata *)call->data)->syscall_nr;
103286+ if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
103287+ return -EINVAL;
103288
103289 mutex_lock(&syscall_trace_lock);
103290 if (!sys_perf_refcount_enter)
103291@@ -610,6 +612,8 @@ static void perf_sysenter_disable(struct ftrace_event_call *call)
103292 int num;
103293
103294 num = ((struct syscall_metadata *)call->data)->syscall_nr;
103295+ if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
103296+ return;
103297
103298 mutex_lock(&syscall_trace_lock);
103299 sys_perf_refcount_enter--;
103300@@ -662,6 +666,8 @@ static int perf_sysexit_enable(struct ftrace_event_call *call)
103301 int num;
103302
103303 num = ((struct syscall_metadata *)call->data)->syscall_nr;
103304+ if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
103305+ return -EINVAL;
103306
103307 mutex_lock(&syscall_trace_lock);
103308 if (!sys_perf_refcount_exit)
103309@@ -682,6 +688,8 @@ static void perf_sysexit_disable(struct ftrace_event_call *call)
103310 int num;
103311
103312 num = ((struct syscall_metadata *)call->data)->syscall_nr;
103313+ if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
103314+ return;
103315
103316 mutex_lock(&syscall_trace_lock);
103317 sys_perf_refcount_exit--;
103318diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
103319index 4109f83..fe1f830 100644
103320--- a/kernel/user_namespace.c
103321+++ b/kernel/user_namespace.c
103322@@ -83,6 +83,21 @@ int create_user_ns(struct cred *new)
103323 !kgid_has_mapping(parent_ns, group))
103324 return -EPERM;
103325
103326+#ifdef CONFIG_GRKERNSEC
103327+ /*
103328+ * This doesn't really inspire confidence:
103329+ * http://marc.info/?l=linux-kernel&m=135543612731939&w=2
103330+ * http://marc.info/?l=linux-kernel&m=135545831607095&w=2
103331+ * Increases kernel attack surface in areas developers
103332+ * previously cared little about ("low importance due
103333+ * to requiring "root" capability")
103334+ * To be removed when this code receives *proper* review
103335+ */
103336+ if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SETUID) ||
103337+ !capable(CAP_SETGID))
103338+ return -EPERM;
103339+#endif
103340+
103341 ns = kmem_cache_zalloc(user_ns_cachep, GFP_KERNEL);
103342 if (!ns)
103343 return -ENOMEM;
103344@@ -980,7 +995,7 @@ static int userns_install(struct nsproxy *nsproxy, struct ns_common *ns)
103345 if (atomic_read(&current->mm->mm_users) > 1)
103346 return -EINVAL;
103347
103348- if (current->fs->users != 1)
103349+ if (atomic_read(&current->fs->users) != 1)
103350 return -EINVAL;
103351
103352 if (!ns_capable(user_ns, CAP_SYS_ADMIN))
103353diff --git a/kernel/utsname_sysctl.c b/kernel/utsname_sysctl.c
103354index c8eac43..4b5f08f 100644
103355--- a/kernel/utsname_sysctl.c
103356+++ b/kernel/utsname_sysctl.c
103357@@ -47,7 +47,7 @@ static void put_uts(struct ctl_table *table, int write, void *which)
103358 static int proc_do_uts_string(struct ctl_table *table, int write,
103359 void __user *buffer, size_t *lenp, loff_t *ppos)
103360 {
103361- struct ctl_table uts_table;
103362+ ctl_table_no_const uts_table;
103363 int r;
103364 memcpy(&uts_table, table, sizeof(uts_table));
103365 uts_table.data = get_uts(table, write);
103366diff --git a/kernel/watchdog.c b/kernel/watchdog.c
103367index 3174bf8..3553520 100644
103368--- a/kernel/watchdog.c
103369+++ b/kernel/watchdog.c
103370@@ -572,7 +572,7 @@ static int watchdog_nmi_enable(unsigned int cpu) { return 0; }
103371 static void watchdog_nmi_disable(unsigned int cpu) { return; }
103372 #endif /* CONFIG_HARDLOCKUP_DETECTOR */
103373
103374-static struct smp_hotplug_thread watchdog_threads = {
103375+static struct smp_hotplug_thread watchdog_threads __read_only = {
103376 .store = &softlockup_watchdog,
103377 .thread_should_run = watchdog_should_run,
103378 .thread_fn = watchdog,
103379diff --git a/kernel/workqueue.c b/kernel/workqueue.c
103380index 41ff75b..5ad683a 100644
103381--- a/kernel/workqueue.c
103382+++ b/kernel/workqueue.c
103383@@ -4564,7 +4564,7 @@ static void rebind_workers(struct worker_pool *pool)
103384 WARN_ON_ONCE(!(worker_flags & WORKER_UNBOUND));
103385 worker_flags |= WORKER_REBOUND;
103386 worker_flags &= ~WORKER_UNBOUND;
103387- ACCESS_ONCE(worker->flags) = worker_flags;
103388+ ACCESS_ONCE_RW(worker->flags) = worker_flags;
103389 }
103390
103391 spin_unlock_irq(&pool->lock);
103392diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
103393index c5cefb3..a4241e3 100644
103394--- a/lib/Kconfig.debug
103395+++ b/lib/Kconfig.debug
103396@@ -923,7 +923,7 @@ config DEBUG_MUTEXES
103397
103398 config DEBUG_WW_MUTEX_SLOWPATH
103399 bool "Wait/wound mutex debugging: Slowpath testing"
103400- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
103401+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
103402 select DEBUG_LOCK_ALLOC
103403 select DEBUG_SPINLOCK
103404 select DEBUG_MUTEXES
103405@@ -940,7 +940,7 @@ config DEBUG_WW_MUTEX_SLOWPATH
103406
103407 config DEBUG_LOCK_ALLOC
103408 bool "Lock debugging: detect incorrect freeing of live locks"
103409- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
103410+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
103411 select DEBUG_SPINLOCK
103412 select DEBUG_MUTEXES
103413 select LOCKDEP
103414@@ -954,7 +954,7 @@ config DEBUG_LOCK_ALLOC
103415
103416 config PROVE_LOCKING
103417 bool "Lock debugging: prove locking correctness"
103418- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
103419+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
103420 select LOCKDEP
103421 select DEBUG_SPINLOCK
103422 select DEBUG_MUTEXES
103423@@ -1005,7 +1005,7 @@ config LOCKDEP
103424
103425 config LOCK_STAT
103426 bool "Lock usage statistics"
103427- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
103428+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
103429 select LOCKDEP
103430 select DEBUG_SPINLOCK
103431 select DEBUG_MUTEXES
103432@@ -1467,6 +1467,7 @@ config LATENCYTOP
103433 depends on DEBUG_KERNEL
103434 depends on STACKTRACE_SUPPORT
103435 depends on PROC_FS
103436+ depends on !GRKERNSEC_HIDESYM
103437 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND && !ARC
103438 select KALLSYMS
103439 select KALLSYMS_ALL
103440@@ -1483,7 +1484,7 @@ config ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
103441 config DEBUG_STRICT_USER_COPY_CHECKS
103442 bool "Strict user copy size checks"
103443 depends on ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
103444- depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING
103445+ depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING && !PAX_SIZE_OVERFLOW
103446 help
103447 Enabling this option turns a certain set of sanity checks for user
103448 copy operations into compile time failures.
103449@@ -1614,7 +1615,7 @@ endmenu # runtime tests
103450
103451 config PROVIDE_OHCI1394_DMA_INIT
103452 bool "Remote debugging over FireWire early on boot"
103453- depends on PCI && X86
103454+ depends on PCI && X86 && !GRKERNSEC
103455 help
103456 If you want to debug problems which hang or crash the kernel early
103457 on boot and the crashing machine has a FireWire port, you can use
103458diff --git a/lib/Makefile b/lib/Makefile
103459index 58f74d2..08e011f 100644
103460--- a/lib/Makefile
103461+++ b/lib/Makefile
103462@@ -59,7 +59,7 @@ obj-$(CONFIG_BTREE) += btree.o
103463 obj-$(CONFIG_INTERVAL_TREE) += interval_tree.o
103464 obj-$(CONFIG_ASSOCIATIVE_ARRAY) += assoc_array.o
103465 obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
103466-obj-$(CONFIG_DEBUG_LIST) += list_debug.o
103467+obj-y += list_debug.o
103468 obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o
103469
103470 ifneq ($(CONFIG_HAVE_DEC_LOCK),y)
103471diff --git a/lib/average.c b/lib/average.c
103472index 114d1be..ab0350c 100644
103473--- a/lib/average.c
103474+++ b/lib/average.c
103475@@ -55,7 +55,7 @@ struct ewma *ewma_add(struct ewma *avg, unsigned long val)
103476 {
103477 unsigned long internal = ACCESS_ONCE(avg->internal);
103478
103479- ACCESS_ONCE(avg->internal) = internal ?
103480+ ACCESS_ONCE_RW(avg->internal) = internal ?
103481 (((internal << avg->weight) - internal) +
103482 (val << avg->factor)) >> avg->weight :
103483 (val << avg->factor);
103484diff --git a/lib/bitmap.c b/lib/bitmap.c
103485index d456f4c1..29a0308 100644
103486--- a/lib/bitmap.c
103487+++ b/lib/bitmap.c
103488@@ -264,7 +264,7 @@ int __bitmap_subset(const unsigned long *bitmap1,
103489 }
103490 EXPORT_SYMBOL(__bitmap_subset);
103491
103492-int __bitmap_weight(const unsigned long *bitmap, unsigned int bits)
103493+int __intentional_overflow(-1) __bitmap_weight(const unsigned long *bitmap, unsigned int bits)
103494 {
103495 unsigned int k, lim = bits/BITS_PER_LONG;
103496 int w = 0;
103497@@ -391,7 +391,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
103498 {
103499 int c, old_c, totaldigits, ndigits, nchunks, nbits;
103500 u32 chunk;
103501- const char __user __force *ubuf = (const char __user __force *)buf;
103502+ const char __user *ubuf = (const char __force_user *)buf;
103503
103504 bitmap_zero(maskp, nmaskbits);
103505
103506@@ -476,7 +476,7 @@ int bitmap_parse_user(const char __user *ubuf,
103507 {
103508 if (!access_ok(VERIFY_READ, ubuf, ulen))
103509 return -EFAULT;
103510- return __bitmap_parse((const char __force *)ubuf,
103511+ return __bitmap_parse((const char __force_kernel *)ubuf,
103512 ulen, 1, maskp, nmaskbits);
103513
103514 }
103515@@ -535,7 +535,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
103516 {
103517 unsigned a, b;
103518 int c, old_c, totaldigits;
103519- const char __user __force *ubuf = (const char __user __force *)buf;
103520+ const char __user *ubuf = (const char __force_user *)buf;
103521 int exp_digit, in_range;
103522
103523 totaldigits = c = 0;
103524@@ -630,7 +630,7 @@ int bitmap_parselist_user(const char __user *ubuf,
103525 {
103526 if (!access_ok(VERIFY_READ, ubuf, ulen))
103527 return -EFAULT;
103528- return __bitmap_parselist((const char __force *)ubuf,
103529+ return __bitmap_parselist((const char __force_kernel *)ubuf,
103530 ulen, 1, maskp, nmaskbits);
103531 }
103532 EXPORT_SYMBOL(bitmap_parselist_user);
103533diff --git a/lib/bug.c b/lib/bug.c
103534index 0c3bd95..5a615a1 100644
103535--- a/lib/bug.c
103536+++ b/lib/bug.c
103537@@ -145,6 +145,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
103538 return BUG_TRAP_TYPE_NONE;
103539
103540 bug = find_bug(bugaddr);
103541+ if (!bug)
103542+ return BUG_TRAP_TYPE_NONE;
103543
103544 file = NULL;
103545 line = 0;
103546diff --git a/lib/debugobjects.c b/lib/debugobjects.c
103547index 547f7f9..a6d4ba0 100644
103548--- a/lib/debugobjects.c
103549+++ b/lib/debugobjects.c
103550@@ -289,7 +289,7 @@ static void debug_object_is_on_stack(void *addr, int onstack)
103551 if (limit > 4)
103552 return;
103553
103554- is_on_stack = object_is_on_stack(addr);
103555+ is_on_stack = object_starts_on_stack(addr);
103556 if (is_on_stack == onstack)
103557 return;
103558
103559diff --git a/lib/decompress_bunzip2.c b/lib/decompress_bunzip2.c
103560index 6dd0335..1e9c239 100644
103561--- a/lib/decompress_bunzip2.c
103562+++ b/lib/decompress_bunzip2.c
103563@@ -665,7 +665,8 @@ static int INIT start_bunzip(struct bunzip_data **bdp, void *inbuf, long len,
103564
103565 /* Fourth byte (ascii '1'-'9'), indicates block size in units of 100k of
103566 uncompressed data. Allocate intermediate buffer for block. */
103567- bd->dbufSize = 100000*(i-BZh0);
103568+ i -= BZh0;
103569+ bd->dbufSize = 100000 * i;
103570
103571 bd->dbuf = large_malloc(bd->dbufSize * sizeof(int));
103572 if (!bd->dbuf)
103573diff --git a/lib/decompress_unlzma.c b/lib/decompress_unlzma.c
103574index 0be83af..4605e93 100644
103575--- a/lib/decompress_unlzma.c
103576+++ b/lib/decompress_unlzma.c
103577@@ -39,10 +39,10 @@
103578
103579 #define MIN(a, b) (((a) < (b)) ? (a) : (b))
103580
103581-static long long INIT read_int(unsigned char *ptr, int size)
103582+static unsigned long long INIT read_int(unsigned char *ptr, int size)
103583 {
103584 int i;
103585- long long ret = 0;
103586+ unsigned long long ret = 0;
103587
103588 for (i = 0; i < size; i++)
103589 ret = (ret << 8) | ptr[size-i-1];
103590diff --git a/lib/div64.c b/lib/div64.c
103591index 4382ad7..08aa558 100644
103592--- a/lib/div64.c
103593+++ b/lib/div64.c
103594@@ -59,7 +59,7 @@ uint32_t __attribute__((weak)) __div64_32(uint64_t *n, uint32_t base)
103595 EXPORT_SYMBOL(__div64_32);
103596
103597 #ifndef div_s64_rem
103598-s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
103599+s64 __intentional_overflow(-1) div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
103600 {
103601 u64 quotient;
103602
103603@@ -130,7 +130,7 @@ EXPORT_SYMBOL(div64_u64_rem);
103604 * 'http://www.hackersdelight.org/HDcode/newCode/divDouble.c.txt'
103605 */
103606 #ifndef div64_u64
103607-u64 div64_u64(u64 dividend, u64 divisor)
103608+u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor)
103609 {
103610 u32 high = divisor >> 32;
103611 u64 quot;
103612diff --git a/lib/dma-debug.c b/lib/dma-debug.c
103613index 9722bd2..0d826f4 100644
103614--- a/lib/dma-debug.c
103615+++ b/lib/dma-debug.c
103616@@ -979,7 +979,7 @@ static int dma_debug_device_change(struct notifier_block *nb, unsigned long acti
103617
103618 void dma_debug_add_bus(struct bus_type *bus)
103619 {
103620- struct notifier_block *nb;
103621+ notifier_block_no_const *nb;
103622
103623 if (dma_debug_disabled())
103624 return;
103625@@ -1161,7 +1161,7 @@ static void check_unmap(struct dma_debug_entry *ref)
103626
103627 static void check_for_stack(struct device *dev, void *addr)
103628 {
103629- if (object_is_on_stack(addr))
103630+ if (object_starts_on_stack(addr))
103631 err_printk(dev, NULL, "DMA-API: device driver maps memory from "
103632 "stack [addr=%p]\n", addr);
103633 }
103634diff --git a/lib/inflate.c b/lib/inflate.c
103635index 013a761..c28f3fc 100644
103636--- a/lib/inflate.c
103637+++ b/lib/inflate.c
103638@@ -269,7 +269,7 @@ static void free(void *where)
103639 malloc_ptr = free_mem_ptr;
103640 }
103641 #else
103642-#define malloc(a) kmalloc(a, GFP_KERNEL)
103643+#define malloc(a) kmalloc((a), GFP_KERNEL)
103644 #define free(a) kfree(a)
103645 #endif
103646
103647diff --git a/lib/ioremap.c b/lib/ioremap.c
103648index 0c9216c..863bd89 100644
103649--- a/lib/ioremap.c
103650+++ b/lib/ioremap.c
103651@@ -38,7 +38,7 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
103652 unsigned long next;
103653
103654 phys_addr -= addr;
103655- pmd = pmd_alloc(&init_mm, pud, addr);
103656+ pmd = pmd_alloc_kernel(&init_mm, pud, addr);
103657 if (!pmd)
103658 return -ENOMEM;
103659 do {
103660@@ -56,7 +56,7 @@ static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr,
103661 unsigned long next;
103662
103663 phys_addr -= addr;
103664- pud = pud_alloc(&init_mm, pgd, addr);
103665+ pud = pud_alloc_kernel(&init_mm, pgd, addr);
103666 if (!pud)
103667 return -ENOMEM;
103668 do {
103669diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
103670index bd2bea9..6b3c95e 100644
103671--- a/lib/is_single_threaded.c
103672+++ b/lib/is_single_threaded.c
103673@@ -22,6 +22,9 @@ bool current_is_single_threaded(void)
103674 struct task_struct *p, *t;
103675 bool ret;
103676
103677+ if (!mm)
103678+ return true;
103679+
103680 if (atomic_read(&task->signal->live) != 1)
103681 return false;
103682
103683diff --git a/lib/kobject.c b/lib/kobject.c
103684index 03d4ab3..46f6374 100644
103685--- a/lib/kobject.c
103686+++ b/lib/kobject.c
103687@@ -931,9 +931,9 @@ EXPORT_SYMBOL_GPL(kset_create_and_add);
103688
103689
103690 static DEFINE_SPINLOCK(kobj_ns_type_lock);
103691-static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES];
103692+static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES] __read_only;
103693
103694-int kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
103695+int __init kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
103696 {
103697 enum kobj_ns_type type = ops->type;
103698 int error;
103699diff --git a/lib/list_debug.c b/lib/list_debug.c
103700index c24c2f7..f0296f4 100644
103701--- a/lib/list_debug.c
103702+++ b/lib/list_debug.c
103703@@ -11,7 +11,9 @@
103704 #include <linux/bug.h>
103705 #include <linux/kernel.h>
103706 #include <linux/rculist.h>
103707+#include <linux/mm.h>
103708
103709+#ifdef CONFIG_DEBUG_LIST
103710 /*
103711 * Insert a new entry between two known consecutive entries.
103712 *
103713@@ -19,21 +21,40 @@
103714 * the prev/next entries already!
103715 */
103716
103717+static bool __list_add_debug(struct list_head *new,
103718+ struct list_head *prev,
103719+ struct list_head *next)
103720+{
103721+ if (unlikely(next->prev != prev)) {
103722+ printk(KERN_ERR "list_add corruption. next->prev should be "
103723+ "prev (%p), but was %p. (next=%p).\n",
103724+ prev, next->prev, next);
103725+ BUG();
103726+ return false;
103727+ }
103728+ if (unlikely(prev->next != next)) {
103729+ printk(KERN_ERR "list_add corruption. prev->next should be "
103730+ "next (%p), but was %p. (prev=%p).\n",
103731+ next, prev->next, prev);
103732+ BUG();
103733+ return false;
103734+ }
103735+ if (unlikely(new == prev || new == next)) {
103736+ printk(KERN_ERR "list_add double add: new=%p, prev=%p, next=%p.\n",
103737+ new, prev, next);
103738+ BUG();
103739+ return false;
103740+ }
103741+ return true;
103742+}
103743+
103744 void __list_add(struct list_head *new,
103745- struct list_head *prev,
103746- struct list_head *next)
103747+ struct list_head *prev,
103748+ struct list_head *next)
103749 {
103750- WARN(next->prev != prev,
103751- "list_add corruption. next->prev should be "
103752- "prev (%p), but was %p. (next=%p).\n",
103753- prev, next->prev, next);
103754- WARN(prev->next != next,
103755- "list_add corruption. prev->next should be "
103756- "next (%p), but was %p. (prev=%p).\n",
103757- next, prev->next, prev);
103758- WARN(new == prev || new == next,
103759- "list_add double add: new=%p, prev=%p, next=%p.\n",
103760- new, prev, next);
103761+ if (!__list_add_debug(new, prev, next))
103762+ return;
103763+
103764 next->prev = new;
103765 new->next = next;
103766 new->prev = prev;
103767@@ -41,28 +62,46 @@ void __list_add(struct list_head *new,
103768 }
103769 EXPORT_SYMBOL(__list_add);
103770
103771-void __list_del_entry(struct list_head *entry)
103772+static bool __list_del_entry_debug(struct list_head *entry)
103773 {
103774 struct list_head *prev, *next;
103775
103776 prev = entry->prev;
103777 next = entry->next;
103778
103779- if (WARN(next == LIST_POISON1,
103780- "list_del corruption, %p->next is LIST_POISON1 (%p)\n",
103781- entry, LIST_POISON1) ||
103782- WARN(prev == LIST_POISON2,
103783- "list_del corruption, %p->prev is LIST_POISON2 (%p)\n",
103784- entry, LIST_POISON2) ||
103785- WARN(prev->next != entry,
103786- "list_del corruption. prev->next should be %p, "
103787- "but was %p\n", entry, prev->next) ||
103788- WARN(next->prev != entry,
103789- "list_del corruption. next->prev should be %p, "
103790- "but was %p\n", entry, next->prev))
103791+ if (unlikely(next == LIST_POISON1)) {
103792+ printk(KERN_ERR "list_del corruption, %p->next is LIST_POISON1 (%p)\n",
103793+ entry, LIST_POISON1);
103794+ BUG();
103795+ return false;
103796+ }
103797+ if (unlikely(prev == LIST_POISON2)) {
103798+ printk(KERN_ERR "list_del corruption, %p->prev is LIST_POISON2 (%p)\n",
103799+ entry, LIST_POISON2);
103800+ BUG();
103801+ return false;
103802+ }
103803+ if (unlikely(entry->prev->next != entry)) {
103804+ printk(KERN_ERR "list_del corruption. prev->next should be %p, "
103805+ "but was %p\n", entry, prev->next);
103806+ BUG();
103807+ return false;
103808+ }
103809+ if (unlikely(entry->next->prev != entry)) {
103810+ printk(KERN_ERR "list_del corruption. next->prev should be %p, "
103811+ "but was %p\n", entry, next->prev);
103812+ BUG();
103813+ return false;
103814+ }
103815+ return true;
103816+}
103817+
103818+void __list_del_entry(struct list_head *entry)
103819+{
103820+ if (!__list_del_entry_debug(entry))
103821 return;
103822
103823- __list_del(prev, next);
103824+ __list_del(entry->prev, entry->next);
103825 }
103826 EXPORT_SYMBOL(__list_del_entry);
103827
103828@@ -86,15 +125,85 @@ EXPORT_SYMBOL(list_del);
103829 void __list_add_rcu(struct list_head *new,
103830 struct list_head *prev, struct list_head *next)
103831 {
103832- WARN(next->prev != prev,
103833- "list_add_rcu corruption. next->prev should be prev (%p), but was %p. (next=%p).\n",
103834- prev, next->prev, next);
103835- WARN(prev->next != next,
103836- "list_add_rcu corruption. prev->next should be next (%p), but was %p. (prev=%p).\n",
103837- next, prev->next, prev);
103838+ if (!__list_add_debug(new, prev, next))
103839+ return;
103840+
103841 new->next = next;
103842 new->prev = prev;
103843 rcu_assign_pointer(list_next_rcu(prev), new);
103844 next->prev = new;
103845 }
103846 EXPORT_SYMBOL(__list_add_rcu);
103847+#endif
103848+
103849+void __pax_list_add(struct list_head *new, struct list_head *prev, struct list_head *next)
103850+{
103851+#ifdef CONFIG_DEBUG_LIST
103852+ if (!__list_add_debug(new, prev, next))
103853+ return;
103854+#endif
103855+
103856+ pax_open_kernel();
103857+ next->prev = new;
103858+ new->next = next;
103859+ new->prev = prev;
103860+ prev->next = new;
103861+ pax_close_kernel();
103862+}
103863+EXPORT_SYMBOL(__pax_list_add);
103864+
103865+void pax_list_del(struct list_head *entry)
103866+{
103867+#ifdef CONFIG_DEBUG_LIST
103868+ if (!__list_del_entry_debug(entry))
103869+ return;
103870+#endif
103871+
103872+ pax_open_kernel();
103873+ __list_del(entry->prev, entry->next);
103874+ entry->next = LIST_POISON1;
103875+ entry->prev = LIST_POISON2;
103876+ pax_close_kernel();
103877+}
103878+EXPORT_SYMBOL(pax_list_del);
103879+
103880+void pax_list_del_init(struct list_head *entry)
103881+{
103882+ pax_open_kernel();
103883+ __list_del(entry->prev, entry->next);
103884+ INIT_LIST_HEAD(entry);
103885+ pax_close_kernel();
103886+}
103887+EXPORT_SYMBOL(pax_list_del_init);
103888+
103889+void __pax_list_add_rcu(struct list_head *new,
103890+ struct list_head *prev, struct list_head *next)
103891+{
103892+#ifdef CONFIG_DEBUG_LIST
103893+ if (!__list_add_debug(new, prev, next))
103894+ return;
103895+#endif
103896+
103897+ pax_open_kernel();
103898+ new->next = next;
103899+ new->prev = prev;
103900+ rcu_assign_pointer(list_next_rcu(prev), new);
103901+ next->prev = new;
103902+ pax_close_kernel();
103903+}
103904+EXPORT_SYMBOL(__pax_list_add_rcu);
103905+
103906+void pax_list_del_rcu(struct list_head *entry)
103907+{
103908+#ifdef CONFIG_DEBUG_LIST
103909+ if (!__list_del_entry_debug(entry))
103910+ return;
103911+#endif
103912+
103913+ pax_open_kernel();
103914+ __list_del(entry->prev, entry->next);
103915+ entry->next = LIST_POISON1;
103916+ entry->prev = LIST_POISON2;
103917+ pax_close_kernel();
103918+}
103919+EXPORT_SYMBOL(pax_list_del_rcu);
103920diff --git a/lib/lockref.c b/lib/lockref.c
103921index ecb9a66..a044fc5 100644
103922--- a/lib/lockref.c
103923+++ b/lib/lockref.c
103924@@ -48,13 +48,13 @@
103925 void lockref_get(struct lockref *lockref)
103926 {
103927 CMPXCHG_LOOP(
103928- new.count++;
103929+ __lockref_inc(&new);
103930 ,
103931 return;
103932 );
103933
103934 spin_lock(&lockref->lock);
103935- lockref->count++;
103936+ __lockref_inc(lockref);
103937 spin_unlock(&lockref->lock);
103938 }
103939 EXPORT_SYMBOL(lockref_get);
103940@@ -69,8 +69,8 @@ int lockref_get_not_zero(struct lockref *lockref)
103941 int retval;
103942
103943 CMPXCHG_LOOP(
103944- new.count++;
103945- if (old.count <= 0)
103946+ __lockref_inc(&new);
103947+ if (__lockref_read(&old) <= 0)
103948 return 0;
103949 ,
103950 return 1;
103951@@ -78,8 +78,8 @@ int lockref_get_not_zero(struct lockref *lockref)
103952
103953 spin_lock(&lockref->lock);
103954 retval = 0;
103955- if (lockref->count > 0) {
103956- lockref->count++;
103957+ if (__lockref_read(lockref) > 0) {
103958+ __lockref_inc(lockref);
103959 retval = 1;
103960 }
103961 spin_unlock(&lockref->lock);
103962@@ -96,17 +96,17 @@ EXPORT_SYMBOL(lockref_get_not_zero);
103963 int lockref_get_or_lock(struct lockref *lockref)
103964 {
103965 CMPXCHG_LOOP(
103966- new.count++;
103967- if (old.count <= 0)
103968+ __lockref_inc(&new);
103969+ if (__lockref_read(&old) <= 0)
103970 break;
103971 ,
103972 return 1;
103973 );
103974
103975 spin_lock(&lockref->lock);
103976- if (lockref->count <= 0)
103977+ if (__lockref_read(lockref) <= 0)
103978 return 0;
103979- lockref->count++;
103980+ __lockref_inc(lockref);
103981 spin_unlock(&lockref->lock);
103982 return 1;
103983 }
103984@@ -122,11 +122,11 @@ EXPORT_SYMBOL(lockref_get_or_lock);
103985 int lockref_put_return(struct lockref *lockref)
103986 {
103987 CMPXCHG_LOOP(
103988- new.count--;
103989- if (old.count <= 0)
103990+ __lockref_dec(&new);
103991+ if (__lockref_read(&old) <= 0)
103992 return -1;
103993 ,
103994- return new.count;
103995+ return __lockref_read(&new);
103996 );
103997 return -1;
103998 }
103999@@ -140,17 +140,17 @@ EXPORT_SYMBOL(lockref_put_return);
104000 int lockref_put_or_lock(struct lockref *lockref)
104001 {
104002 CMPXCHG_LOOP(
104003- new.count--;
104004- if (old.count <= 1)
104005+ __lockref_dec(&new);
104006+ if (__lockref_read(&old) <= 1)
104007 break;
104008 ,
104009 return 1;
104010 );
104011
104012 spin_lock(&lockref->lock);
104013- if (lockref->count <= 1)
104014+ if (__lockref_read(lockref) <= 1)
104015 return 0;
104016- lockref->count--;
104017+ __lockref_dec(lockref);
104018 spin_unlock(&lockref->lock);
104019 return 1;
104020 }
104021@@ -163,7 +163,7 @@ EXPORT_SYMBOL(lockref_put_or_lock);
104022 void lockref_mark_dead(struct lockref *lockref)
104023 {
104024 assert_spin_locked(&lockref->lock);
104025- lockref->count = -128;
104026+ __lockref_set(lockref, -128);
104027 }
104028 EXPORT_SYMBOL(lockref_mark_dead);
104029
104030@@ -177,8 +177,8 @@ int lockref_get_not_dead(struct lockref *lockref)
104031 int retval;
104032
104033 CMPXCHG_LOOP(
104034- new.count++;
104035- if (old.count < 0)
104036+ __lockref_inc(&new);
104037+ if (__lockref_read(&old) < 0)
104038 return 0;
104039 ,
104040 return 1;
104041@@ -186,8 +186,8 @@ int lockref_get_not_dead(struct lockref *lockref)
104042
104043 spin_lock(&lockref->lock);
104044 retval = 0;
104045- if (lockref->count >= 0) {
104046- lockref->count++;
104047+ if (__lockref_read(lockref) >= 0) {
104048+ __lockref_inc(lockref);
104049 retval = 1;
104050 }
104051 spin_unlock(&lockref->lock);
104052diff --git a/lib/mpi/longlong.h b/lib/mpi/longlong.h
104053index aac5114..a89d041 100644
104054--- a/lib/mpi/longlong.h
104055+++ b/lib/mpi/longlong.h
104056@@ -639,7 +639,7 @@ do { \
104057 ************** MIPS *****************
104058 ***************************************/
104059 #if defined(__mips__) && W_TYPE_SIZE == 32
104060-#if __GNUC__ >= 4 && __GNUC_MINOR__ >= 4
104061+#if (__GNUC__ >= 5) || (__GNUC__ >= 4 && __GNUC_MINOR__ >= 4)
104062 #define umul_ppmm(w1, w0, u, v) \
104063 do { \
104064 UDItype __ll = (UDItype)(u) * (v); \
104065@@ -671,7 +671,7 @@ do { \
104066 ************** MIPS/64 **************
104067 ***************************************/
104068 #if (defined(__mips) && __mips >= 3) && W_TYPE_SIZE == 64
104069-#if __GNUC__ >= 4 && __GNUC_MINOR__ >= 4
104070+#if (__GNUC__ >= 5) || (__GNUC__ >= 4 && __GNUC_MINOR__ >= 4)
104071 #define umul_ppmm(w1, w0, u, v) \
104072 do { \
104073 typedef unsigned int __ll_UTItype __attribute__((mode(TI))); \
104074diff --git a/lib/nlattr.c b/lib/nlattr.c
104075index f5907d2..36072be 100644
104076--- a/lib/nlattr.c
104077+++ b/lib/nlattr.c
104078@@ -278,6 +278,8 @@ int nla_memcpy(void *dest, const struct nlattr *src, int count)
104079 {
104080 int minlen = min_t(int, count, nla_len(src));
104081
104082+ BUG_ON(minlen < 0);
104083+
104084 memcpy(dest, nla_data(src), minlen);
104085 if (count > minlen)
104086 memset(dest + minlen, 0, count - minlen);
104087diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c
104088index 6111bcb..02e816b 100644
104089--- a/lib/percpu-refcount.c
104090+++ b/lib/percpu-refcount.c
104091@@ -31,7 +31,7 @@
104092 * atomic_long_t can't hit 0 before we've added up all the percpu refs.
104093 */
104094
104095-#define PERCPU_COUNT_BIAS (1LU << (BITS_PER_LONG - 1))
104096+#define PERCPU_COUNT_BIAS (1LU << (BITS_PER_LONG - 2))
104097
104098 static DECLARE_WAIT_QUEUE_HEAD(percpu_ref_switch_waitq);
104099
104100diff --git a/lib/radix-tree.c b/lib/radix-tree.c
104101index 3d2aa27..a472f20 100644
104102--- a/lib/radix-tree.c
104103+++ b/lib/radix-tree.c
104104@@ -67,7 +67,7 @@ struct radix_tree_preload {
104105 int nr;
104106 struct radix_tree_node *nodes[RADIX_TREE_PRELOAD_SIZE];
104107 };
104108-static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
104109+static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
104110
104111 static inline void *ptr_to_indirect(void *ptr)
104112 {
104113diff --git a/lib/random32.c b/lib/random32.c
104114index 0bee183..526f12f 100644
104115--- a/lib/random32.c
104116+++ b/lib/random32.c
104117@@ -47,7 +47,7 @@ static inline void prandom_state_selftest(void)
104118 }
104119 #endif
104120
104121-static DEFINE_PER_CPU(struct rnd_state, net_rand_state);
104122+static DEFINE_PER_CPU(struct rnd_state, net_rand_state) __latent_entropy;
104123
104124 /**
104125 * prandom_u32_state - seeded pseudo-random number generator.
104126diff --git a/lib/rbtree.c b/lib/rbtree.c
104127index c16c81a..4dcbda1 100644
104128--- a/lib/rbtree.c
104129+++ b/lib/rbtree.c
104130@@ -380,7 +380,9 @@ static inline void dummy_copy(struct rb_node *old, struct rb_node *new) {}
104131 static inline void dummy_rotate(struct rb_node *old, struct rb_node *new) {}
104132
104133 static const struct rb_augment_callbacks dummy_callbacks = {
104134- dummy_propagate, dummy_copy, dummy_rotate
104135+ .propagate = dummy_propagate,
104136+ .copy = dummy_copy,
104137+ .rotate = dummy_rotate
104138 };
104139
104140 void rb_insert_color(struct rb_node *node, struct rb_root *root)
104141diff --git a/lib/show_mem.c b/lib/show_mem.c
104142index adc98e18..0ce83c2 100644
104143--- a/lib/show_mem.c
104144+++ b/lib/show_mem.c
104145@@ -49,6 +49,6 @@ void show_mem(unsigned int filter)
104146 quicklist_total_size());
104147 #endif
104148 #ifdef CONFIG_MEMORY_FAILURE
104149- printk("%lu pages hwpoisoned\n", atomic_long_read(&num_poisoned_pages));
104150+ printk("%lu pages hwpoisoned\n", atomic_long_read_unchecked(&num_poisoned_pages));
104151 #endif
104152 }
104153diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c
104154index e0af6ff..fcc9f15 100644
104155--- a/lib/strncpy_from_user.c
104156+++ b/lib/strncpy_from_user.c
104157@@ -22,7 +22,7 @@
104158 */
104159 static inline long do_strncpy_from_user(char *dst, const char __user *src, long count, unsigned long max)
104160 {
104161- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
104162+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
104163 long res = 0;
104164
104165 /*
104166diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c
104167index 1164961..02dccaa 100644
104168--- a/lib/strnlen_user.c
104169+++ b/lib/strnlen_user.c
104170@@ -26,7 +26,7 @@
104171 */
104172 static inline long do_strnlen_user(const char __user *src, unsigned long count, unsigned long max)
104173 {
104174- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
104175+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
104176 long align, res = 0;
104177 unsigned long c;
104178
104179diff --git a/lib/swiotlb.c b/lib/swiotlb.c
104180index 4abda07..b9d3765 100644
104181--- a/lib/swiotlb.c
104182+++ b/lib/swiotlb.c
104183@@ -682,7 +682,7 @@ EXPORT_SYMBOL(swiotlb_alloc_coherent);
104184
104185 void
104186 swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
104187- dma_addr_t dev_addr)
104188+ dma_addr_t dev_addr, struct dma_attrs *attrs)
104189 {
104190 phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
104191
104192diff --git a/lib/test-hexdump.c b/lib/test-hexdump.c
104193index daf29a39..56f44ac 100644
104194--- a/lib/test-hexdump.c
104195+++ b/lib/test-hexdump.c
104196@@ -18,26 +18,26 @@ static const unsigned char data_b[] = {
104197
104198 static const unsigned char data_a[] = ".2.{....p..$}.4...1.....L...C...";
104199
104200-static const char *test_data_1_le[] __initconst = {
104201+static const char * const test_data_1_le[] __initconst = {
104202 "be", "32", "db", "7b", "0a", "18", "93", "b2",
104203 "70", "ba", "c4", "24", "7d", "83", "34", "9b",
104204 "a6", "9c", "31", "ad", "9c", "0f", "ac", "e9",
104205 "4c", "d1", "19", "99", "43", "b1", "af", "0c",
104206 };
104207
104208-static const char *test_data_2_le[] __initconst = {
104209+static const char * const test_data_2_le[] __initconst = {
104210 "32be", "7bdb", "180a", "b293",
104211 "ba70", "24c4", "837d", "9b34",
104212 "9ca6", "ad31", "0f9c", "e9ac",
104213 "d14c", "9919", "b143", "0caf",
104214 };
104215
104216-static const char *test_data_4_le[] __initconst = {
104217+static const char * const test_data_4_le[] __initconst = {
104218 "7bdb32be", "b293180a", "24c4ba70", "9b34837d",
104219 "ad319ca6", "e9ac0f9c", "9919d14c", "0cafb143",
104220 };
104221
104222-static const char *test_data_8_le[] __initconst = {
104223+static const char * const test_data_8_le[] __initconst = {
104224 "b293180a7bdb32be", "9b34837d24c4ba70",
104225 "e9ac0f9cad319ca6", "0cafb1439919d14c",
104226 };
104227diff --git a/lib/usercopy.c b/lib/usercopy.c
104228index 4f5b1dd..7cab418 100644
104229--- a/lib/usercopy.c
104230+++ b/lib/usercopy.c
104231@@ -7,3 +7,9 @@ void copy_from_user_overflow(void)
104232 WARN(1, "Buffer overflow detected!\n");
104233 }
104234 EXPORT_SYMBOL(copy_from_user_overflow);
104235+
104236+void copy_to_user_overflow(void)
104237+{
104238+ WARN(1, "Buffer overflow detected!\n");
104239+}
104240+EXPORT_SYMBOL(copy_to_user_overflow);
104241diff --git a/lib/vsprintf.c b/lib/vsprintf.c
104242index b235c96..343ffc1 100644
104243--- a/lib/vsprintf.c
104244+++ b/lib/vsprintf.c
104245@@ -16,6 +16,9 @@
104246 * - scnprintf and vscnprintf
104247 */
104248
104249+#ifdef CONFIG_GRKERNSEC_HIDESYM
104250+#define __INCLUDED_BY_HIDESYM 1
104251+#endif
104252 #include <stdarg.h>
104253 #include <linux/module.h> /* for KSYM_SYMBOL_LEN */
104254 #include <linux/types.h>
104255@@ -626,7 +629,7 @@ char *symbol_string(char *buf, char *end, void *ptr,
104256 #ifdef CONFIG_KALLSYMS
104257 if (*fmt == 'B')
104258 sprint_backtrace(sym, value);
104259- else if (*fmt != 'f' && *fmt != 's')
104260+ else if (*fmt != 'f' && *fmt != 's' && *fmt != 'X')
104261 sprint_symbol(sym, value);
104262 else
104263 sprint_symbol_no_offset(sym, value);
104264@@ -1322,7 +1325,11 @@ char *address_val(char *buf, char *end, const void *addr,
104265 return number(buf, end, num, spec);
104266 }
104267
104268+#ifdef CONFIG_GRKERNSEC_HIDESYM
104269+int kptr_restrict __read_mostly = 2;
104270+#else
104271 int kptr_restrict __read_mostly;
104272+#endif
104273
104274 /*
104275 * Show a '%p' thing. A kernel extension is that the '%p' is followed
104276@@ -1333,8 +1340,10 @@ int kptr_restrict __read_mostly;
104277 *
104278 * - 'F' For symbolic function descriptor pointers with offset
104279 * - 'f' For simple symbolic function names without offset
104280+ * - 'X' For simple symbolic function names without offset approved for use with GRKERNSEC_HIDESYM
104281 * - 'S' For symbolic direct pointers with offset
104282 * - 's' For symbolic direct pointers without offset
104283+ * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
104284 * - '[FfSs]R' as above with __builtin_extract_return_addr() translation
104285 * - 'B' For backtraced symbolic direct pointers with offset
104286 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
104287@@ -1417,12 +1426,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
104288
104289 if (!ptr && *fmt != 'K') {
104290 /*
104291- * Print (null) with the same width as a pointer so it makes
104292+ * Print (nil) with the same width as a pointer so it makes
104293 * tabular output look nice.
104294 */
104295 if (spec.field_width == -1)
104296 spec.field_width = default_width;
104297- return string(buf, end, "(null)", spec);
104298+ return string(buf, end, "(nil)", spec);
104299 }
104300
104301 switch (*fmt) {
104302@@ -1432,6 +1441,14 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
104303 /* Fallthrough */
104304 case 'S':
104305 case 's':
104306+#ifdef CONFIG_GRKERNSEC_HIDESYM
104307+ break;
104308+#else
104309+ return symbol_string(buf, end, ptr, spec, fmt);
104310+#endif
104311+ case 'X':
104312+ ptr = dereference_function_descriptor(ptr);
104313+ case 'A':
104314 case 'B':
104315 return symbol_string(buf, end, ptr, spec, fmt);
104316 case 'R':
104317@@ -1496,6 +1513,8 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
104318 va_end(va);
104319 return buf;
104320 }
104321+ case 'P':
104322+ break;
104323 case 'K':
104324 /*
104325 * %pK cannot be used in IRQ context because its test
104326@@ -1553,6 +1572,22 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
104327 ((const struct file *)ptr)->f_path.dentry,
104328 spec, fmt);
104329 }
104330+
104331+#ifdef CONFIG_GRKERNSEC_HIDESYM
104332+ /* 'P' = approved pointers to copy to userland,
104333+ as in the /proc/kallsyms case, as we make it display nothing
104334+ for non-root users, and the real contents for root users
104335+ 'X' = approved simple symbols
104336+ Also ignore 'K' pointers, since we force their NULLing for non-root users
104337+ above
104338+ */
104339+ if ((unsigned long)ptr > TASK_SIZE && *fmt != 'P' && *fmt != 'X' && *fmt != 'K' && is_usercopy_object(buf)) {
104340+ printk(KERN_ALERT "grsec: kernel infoleak detected! Please report this log to spender@grsecurity.net.\n");
104341+ dump_stack();
104342+ ptr = NULL;
104343+ }
104344+#endif
104345+
104346 spec.flags |= SMALL;
104347 if (spec.field_width == -1) {
104348 spec.field_width = default_width;
104349@@ -2254,11 +2289,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
104350 typeof(type) value; \
104351 if (sizeof(type) == 8) { \
104352 args = PTR_ALIGN(args, sizeof(u32)); \
104353- *(u32 *)&value = *(u32 *)args; \
104354- *((u32 *)&value + 1) = *(u32 *)(args + 4); \
104355+ *(u32 *)&value = *(const u32 *)args; \
104356+ *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
104357 } else { \
104358 args = PTR_ALIGN(args, sizeof(type)); \
104359- value = *(typeof(type) *)args; \
104360+ value = *(const typeof(type) *)args; \
104361 } \
104362 args += sizeof(type); \
104363 value; \
104364@@ -2321,7 +2356,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
104365 case FORMAT_TYPE_STR: {
104366 const char *str_arg = args;
104367 args += strlen(str_arg) + 1;
104368- str = string(str, end, (char *)str_arg, spec);
104369+ str = string(str, end, str_arg, spec);
104370 break;
104371 }
104372
104373diff --git a/localversion-grsec b/localversion-grsec
104374new file mode 100644
104375index 0000000..7cd6065
104376--- /dev/null
104377+++ b/localversion-grsec
104378@@ -0,0 +1 @@
104379+-grsec
104380diff --git a/mm/Kconfig b/mm/Kconfig
104381index a03131b..1b1bafb 100644
104382--- a/mm/Kconfig
104383+++ b/mm/Kconfig
104384@@ -342,10 +342,11 @@ config KSM
104385 root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
104386
104387 config DEFAULT_MMAP_MIN_ADDR
104388- int "Low address space to protect from user allocation"
104389+ int "Low address space to protect from user allocation"
104390 depends on MMU
104391- default 4096
104392- help
104393+ default 32768 if ALPHA || ARM || PARISC || SPARC32
104394+ default 65536
104395+ help
104396 This is the portion of low virtual memory which should be protected
104397 from userspace allocation. Keeping a user from writing to low pages
104398 can help reduce the impact of kernel NULL pointer bugs.
104399@@ -376,7 +377,7 @@ config MEMORY_FAILURE
104400
104401 config HWPOISON_INJECT
104402 tristate "HWPoison pages injector"
104403- depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS
104404+ depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS && !GRKERNSEC
104405 select PROC_PAGE_MONITOR
104406
104407 config NOMMU_INITIAL_TRIM_EXCESS
104408diff --git a/mm/Kconfig.debug b/mm/Kconfig.debug
104409index 957d3da..1d34e20 100644
104410--- a/mm/Kconfig.debug
104411+++ b/mm/Kconfig.debug
104412@@ -10,6 +10,7 @@ config PAGE_EXTENSION
104413 config DEBUG_PAGEALLOC
104414 bool "Debug page memory allocations"
104415 depends on DEBUG_KERNEL
104416+ depends on !PAX_MEMORY_SANITIZE
104417 depends on !HIBERNATION || ARCH_SUPPORTS_DEBUG_PAGEALLOC && !PPC && !SPARC
104418 depends on !KMEMCHECK
104419 select PAGE_EXTENSION
104420diff --git a/mm/backing-dev.c b/mm/backing-dev.c
104421index 000e7b3..aad2605 100644
104422--- a/mm/backing-dev.c
104423+++ b/mm/backing-dev.c
104424@@ -12,7 +12,7 @@
104425 #include <linux/device.h>
104426 #include <trace/events/writeback.h>
104427
104428-static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
104429+static atomic_long_unchecked_t bdi_seq = ATOMIC_LONG_INIT(0);
104430
104431 struct backing_dev_info noop_backing_dev_info = {
104432 .name = "noop",
104433@@ -458,7 +458,7 @@ int bdi_setup_and_register(struct backing_dev_info *bdi, char *name)
104434 return err;
104435
104436 err = bdi_register(bdi, NULL, "%.28s-%ld", name,
104437- atomic_long_inc_return(&bdi_seq));
104438+ atomic_long_inc_return_unchecked(&bdi_seq));
104439 if (err) {
104440 bdi_destroy(bdi);
104441 return err;
104442diff --git a/mm/filemap.c b/mm/filemap.c
104443index ad72420..0a20ef2 100644
104444--- a/mm/filemap.c
104445+++ b/mm/filemap.c
104446@@ -2097,7 +2097,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
104447 struct address_space *mapping = file->f_mapping;
104448
104449 if (!mapping->a_ops->readpage)
104450- return -ENOEXEC;
104451+ return -ENODEV;
104452 file_accessed(file);
104453 vma->vm_ops = &generic_file_vm_ops;
104454 return 0;
104455@@ -2275,6 +2275,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
104456 *pos = i_size_read(inode);
104457
104458 if (limit != RLIM_INFINITY) {
104459+ gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
104460 if (*pos >= limit) {
104461 send_sig(SIGXFSZ, current, 0);
104462 return -EFBIG;
104463diff --git a/mm/gup.c b/mm/gup.c
104464index a6e24e2..72dd2cf 100644
104465--- a/mm/gup.c
104466+++ b/mm/gup.c
104467@@ -265,11 +265,6 @@ static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma,
104468 unsigned int fault_flags = 0;
104469 int ret;
104470
104471- /* For mlock, just skip the stack guard page. */
104472- if ((*flags & FOLL_MLOCK) &&
104473- (stack_guard_page_start(vma, address) ||
104474- stack_guard_page_end(vma, address + PAGE_SIZE)))
104475- return -ENOENT;
104476 if (*flags & FOLL_WRITE)
104477 fault_flags |= FAULT_FLAG_WRITE;
104478 if (nonblocking)
104479@@ -435,14 +430,14 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
104480 if (!(gup_flags & FOLL_FORCE))
104481 gup_flags |= FOLL_NUMA;
104482
104483- do {
104484+ while (nr_pages) {
104485 struct page *page;
104486 unsigned int foll_flags = gup_flags;
104487 unsigned int page_increm;
104488
104489 /* first iteration or cross vma bound */
104490 if (!vma || start >= vma->vm_end) {
104491- vma = find_extend_vma(mm, start);
104492+ vma = find_vma(mm, start);
104493 if (!vma && in_gate_area(mm, start)) {
104494 int ret;
104495 ret = get_gate_page(mm, start & PAGE_MASK,
104496@@ -454,7 +449,7 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
104497 goto next_page;
104498 }
104499
104500- if (!vma || check_vma_flags(vma, gup_flags))
104501+ if (!vma || start < vma->vm_start || check_vma_flags(vma, gup_flags))
104502 return i ? : -EFAULT;
104503 if (is_vm_hugetlb_page(vma)) {
104504 i = follow_hugetlb_page(mm, vma, pages, vmas,
104505@@ -509,7 +504,7 @@ next_page:
104506 i += page_increm;
104507 start += page_increm * PAGE_SIZE;
104508 nr_pages -= page_increm;
104509- } while (nr_pages);
104510+ }
104511 return i;
104512 }
104513 EXPORT_SYMBOL(__get_user_pages);
104514diff --git a/mm/highmem.c b/mm/highmem.c
104515index 123bcd3..0de52ba 100644
104516--- a/mm/highmem.c
104517+++ b/mm/highmem.c
104518@@ -195,8 +195,9 @@ static void flush_all_zero_pkmaps(void)
104519 * So no dangers, even with speculative execution.
104520 */
104521 page = pte_page(pkmap_page_table[i]);
104522+ pax_open_kernel();
104523 pte_clear(&init_mm, PKMAP_ADDR(i), &pkmap_page_table[i]);
104524-
104525+ pax_close_kernel();
104526 set_page_address(page, NULL);
104527 need_flush = 1;
104528 }
104529@@ -259,9 +260,11 @@ start:
104530 }
104531 }
104532 vaddr = PKMAP_ADDR(last_pkmap_nr);
104533+
104534+ pax_open_kernel();
104535 set_pte_at(&init_mm, vaddr,
104536 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
104537-
104538+ pax_close_kernel();
104539 pkmap_count[last_pkmap_nr] = 1;
104540 set_page_address(page, (void *)vaddr);
104541
104542diff --git a/mm/hugetlb.c b/mm/hugetlb.c
104543index caad3c5..4f68807 100644
104544--- a/mm/hugetlb.c
104545+++ b/mm/hugetlb.c
104546@@ -2260,6 +2260,7 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
104547 struct ctl_table *table, int write,
104548 void __user *buffer, size_t *length, loff_t *ppos)
104549 {
104550+ ctl_table_no_const t;
104551 struct hstate *h = &default_hstate;
104552 unsigned long tmp = h->max_huge_pages;
104553 int ret;
104554@@ -2267,9 +2268,10 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
104555 if (!hugepages_supported())
104556 return -ENOTSUPP;
104557
104558- table->data = &tmp;
104559- table->maxlen = sizeof(unsigned long);
104560- ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
104561+ t = *table;
104562+ t.data = &tmp;
104563+ t.maxlen = sizeof(unsigned long);
104564+ ret = proc_doulongvec_minmax(&t, write, buffer, length, ppos);
104565 if (ret)
104566 goto out;
104567
104568@@ -2304,6 +2306,7 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
104569 struct hstate *h = &default_hstate;
104570 unsigned long tmp;
104571 int ret;
104572+ ctl_table_no_const hugetlb_table;
104573
104574 if (!hugepages_supported())
104575 return -ENOTSUPP;
104576@@ -2313,9 +2316,10 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
104577 if (write && hstate_is_gigantic(h))
104578 return -EINVAL;
104579
104580- table->data = &tmp;
104581- table->maxlen = sizeof(unsigned long);
104582- ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
104583+ hugetlb_table = *table;
104584+ hugetlb_table.data = &tmp;
104585+ hugetlb_table.maxlen = sizeof(unsigned long);
104586+ ret = proc_doulongvec_minmax(&hugetlb_table, write, buffer, length, ppos);
104587 if (ret)
104588 goto out;
104589
104590@@ -2800,6 +2804,27 @@ static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
104591 i_mmap_unlock_write(mapping);
104592 }
104593
104594+#ifdef CONFIG_PAX_SEGMEXEC
104595+static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
104596+{
104597+ struct mm_struct *mm = vma->vm_mm;
104598+ struct vm_area_struct *vma_m;
104599+ unsigned long address_m;
104600+ pte_t *ptep_m;
104601+
104602+ vma_m = pax_find_mirror_vma(vma);
104603+ if (!vma_m)
104604+ return;
104605+
104606+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
104607+ address_m = address + SEGMEXEC_TASK_SIZE;
104608+ ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
104609+ get_page(page_m);
104610+ hugepage_add_anon_rmap(page_m, vma_m, address_m);
104611+ set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
104612+}
104613+#endif
104614+
104615 /*
104616 * Hugetlb_cow() should be called with page lock of the original hugepage held.
104617 * Called with hugetlb_instantiation_mutex held and pte_page locked so we
104618@@ -2912,6 +2937,11 @@ retry_avoidcopy:
104619 make_huge_pte(vma, new_page, 1));
104620 page_remove_rmap(old_page);
104621 hugepage_add_new_anon_rmap(new_page, vma, address);
104622+
104623+#ifdef CONFIG_PAX_SEGMEXEC
104624+ pax_mirror_huge_pte(vma, address, new_page);
104625+#endif
104626+
104627 /* Make the old page be freed below */
104628 new_page = old_page;
104629 }
104630@@ -3072,6 +3102,10 @@ retry:
104631 && (vma->vm_flags & VM_SHARED)));
104632 set_huge_pte_at(mm, address, ptep, new_pte);
104633
104634+#ifdef CONFIG_PAX_SEGMEXEC
104635+ pax_mirror_huge_pte(vma, address, page);
104636+#endif
104637+
104638 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
104639 /* Optimization, do the COW without a second fault */
104640 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page, ptl);
104641@@ -3139,6 +3173,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
104642 struct address_space *mapping;
104643 int need_wait_lock = 0;
104644
104645+#ifdef CONFIG_PAX_SEGMEXEC
104646+ struct vm_area_struct *vma_m;
104647+#endif
104648+
104649 address &= huge_page_mask(h);
104650
104651 ptep = huge_pte_offset(mm, address);
104652@@ -3152,6 +3190,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
104653 VM_FAULT_SET_HINDEX(hstate_index(h));
104654 }
104655
104656+#ifdef CONFIG_PAX_SEGMEXEC
104657+ vma_m = pax_find_mirror_vma(vma);
104658+ if (vma_m) {
104659+ unsigned long address_m;
104660+
104661+ if (vma->vm_start > vma_m->vm_start) {
104662+ address_m = address;
104663+ address -= SEGMEXEC_TASK_SIZE;
104664+ vma = vma_m;
104665+ h = hstate_vma(vma);
104666+ } else
104667+ address_m = address + SEGMEXEC_TASK_SIZE;
104668+
104669+ if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
104670+ return VM_FAULT_OOM;
104671+ address_m &= HPAGE_MASK;
104672+ unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
104673+ }
104674+#endif
104675+
104676 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
104677 if (!ptep)
104678 return VM_FAULT_OOM;
104679diff --git a/mm/internal.h b/mm/internal.h
104680index a96da5b..42ebd54 100644
104681--- a/mm/internal.h
104682+++ b/mm/internal.h
104683@@ -156,6 +156,7 @@ __find_buddy_index(unsigned long page_idx, unsigned int order)
104684
104685 extern int __isolate_free_page(struct page *page, unsigned int order);
104686 extern void __free_pages_bootmem(struct page *page, unsigned int order);
104687+extern void free_compound_page(struct page *page);
104688 extern void prep_compound_page(struct page *page, unsigned long order);
104689 #ifdef CONFIG_MEMORY_FAILURE
104690 extern bool is_free_buddy_page(struct page *page);
104691@@ -411,7 +412,7 @@ extern u32 hwpoison_filter_enable;
104692
104693 extern unsigned long vm_mmap_pgoff(struct file *, unsigned long,
104694 unsigned long, unsigned long,
104695- unsigned long, unsigned long);
104696+ unsigned long, unsigned long) __intentional_overflow(-1);
104697
104698 extern void set_pageblock_order(void);
104699 unsigned long reclaim_clean_pages_from_list(struct zone *zone,
104700diff --git a/mm/kmemleak.c b/mm/kmemleak.c
104701index f0fe4f2..898208c 100644
104702--- a/mm/kmemleak.c
104703+++ b/mm/kmemleak.c
104704@@ -366,7 +366,7 @@ static void print_unreferenced(struct seq_file *seq,
104705
104706 for (i = 0; i < object->trace_len; i++) {
104707 void *ptr = (void *)object->trace[i];
104708- seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
104709+ seq_printf(seq, " [<%pP>] %pA\n", ptr, ptr);
104710 }
104711 }
104712
104713@@ -1912,7 +1912,7 @@ static int __init kmemleak_late_init(void)
104714 return -ENOMEM;
104715 }
104716
104717- dentry = debugfs_create_file("kmemleak", S_IRUGO, NULL, NULL,
104718+ dentry = debugfs_create_file("kmemleak", S_IRUSR, NULL, NULL,
104719 &kmemleak_fops);
104720 if (!dentry)
104721 pr_warning("Failed to create the debugfs kmemleak file\n");
104722diff --git a/mm/maccess.c b/mm/maccess.c
104723index d53adf9..03a24bf 100644
104724--- a/mm/maccess.c
104725+++ b/mm/maccess.c
104726@@ -26,7 +26,7 @@ long __probe_kernel_read(void *dst, const void *src, size_t size)
104727 set_fs(KERNEL_DS);
104728 pagefault_disable();
104729 ret = __copy_from_user_inatomic(dst,
104730- (__force const void __user *)src, size);
104731+ (const void __force_user *)src, size);
104732 pagefault_enable();
104733 set_fs(old_fs);
104734
104735@@ -53,7 +53,7 @@ long __probe_kernel_write(void *dst, const void *src, size_t size)
104736
104737 set_fs(KERNEL_DS);
104738 pagefault_disable();
104739- ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
104740+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
104741 pagefault_enable();
104742 set_fs(old_fs);
104743
104744diff --git a/mm/madvise.c b/mm/madvise.c
104745index d551475..8fdd7f3 100644
104746--- a/mm/madvise.c
104747+++ b/mm/madvise.c
104748@@ -51,6 +51,10 @@ static long madvise_behavior(struct vm_area_struct *vma,
104749 pgoff_t pgoff;
104750 unsigned long new_flags = vma->vm_flags;
104751
104752+#ifdef CONFIG_PAX_SEGMEXEC
104753+ struct vm_area_struct *vma_m;
104754+#endif
104755+
104756 switch (behavior) {
104757 case MADV_NORMAL:
104758 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
104759@@ -126,6 +130,13 @@ success:
104760 /*
104761 * vm_flags is protected by the mmap_sem held in write mode.
104762 */
104763+
104764+#ifdef CONFIG_PAX_SEGMEXEC
104765+ vma_m = pax_find_mirror_vma(vma);
104766+ if (vma_m)
104767+ vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
104768+#endif
104769+
104770 vma->vm_flags = new_flags;
104771
104772 out:
104773@@ -277,11 +288,27 @@ static long madvise_dontneed(struct vm_area_struct *vma,
104774 struct vm_area_struct **prev,
104775 unsigned long start, unsigned long end)
104776 {
104777+
104778+#ifdef CONFIG_PAX_SEGMEXEC
104779+ struct vm_area_struct *vma_m;
104780+#endif
104781+
104782 *prev = vma;
104783 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
104784 return -EINVAL;
104785
104786 zap_page_range(vma, start, end - start, NULL);
104787+
104788+#ifdef CONFIG_PAX_SEGMEXEC
104789+ vma_m = pax_find_mirror_vma(vma);
104790+ if (vma_m) {
104791+ if (vma_m->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
104792+ return -EINVAL;
104793+
104794+ zap_page_range(vma_m, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
104795+ }
104796+#endif
104797+
104798 return 0;
104799 }
104800
104801@@ -484,6 +511,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
104802 if (end < start)
104803 return error;
104804
104805+#ifdef CONFIG_PAX_SEGMEXEC
104806+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
104807+ if (end > SEGMEXEC_TASK_SIZE)
104808+ return error;
104809+ } else
104810+#endif
104811+
104812+ if (end > TASK_SIZE)
104813+ return error;
104814+
104815 error = 0;
104816 if (end == start)
104817 return error;
104818diff --git a/mm/memory-failure.c b/mm/memory-failure.c
104819index 72a5224..51ba846 100644
104820--- a/mm/memory-failure.c
104821+++ b/mm/memory-failure.c
104822@@ -61,7 +61,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
104823
104824 int sysctl_memory_failure_recovery __read_mostly = 1;
104825
104826-atomic_long_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0);
104827+atomic_long_unchecked_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0);
104828
104829 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
104830
104831@@ -198,7 +198,7 @@ static int kill_proc(struct task_struct *t, unsigned long addr, int trapno,
104832 pfn, t->comm, t->pid);
104833 si.si_signo = SIGBUS;
104834 si.si_errno = 0;
104835- si.si_addr = (void *)addr;
104836+ si.si_addr = (void __user *)addr;
104837 #ifdef __ARCH_SI_TRAPNO
104838 si.si_trapno = trapno;
104839 #endif
104840@@ -779,7 +779,7 @@ static struct page_state {
104841 unsigned long res;
104842 char *msg;
104843 int (*action)(struct page *p, unsigned long pfn);
104844-} error_states[] = {
104845+} __do_const error_states[] = {
104846 { reserved, reserved, "reserved kernel", me_kernel },
104847 /*
104848 * free pages are specially detected outside this table:
104849@@ -1087,7 +1087,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
104850 nr_pages = 1 << compound_order(hpage);
104851 else /* normal page or thp */
104852 nr_pages = 1;
104853- atomic_long_add(nr_pages, &num_poisoned_pages);
104854+ atomic_long_add_unchecked(nr_pages, &num_poisoned_pages);
104855
104856 /*
104857 * We need/can do nothing about count=0 pages.
104858@@ -1116,7 +1116,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
104859 if (PageHWPoison(hpage)) {
104860 if ((hwpoison_filter(p) && TestClearPageHWPoison(p))
104861 || (p != hpage && TestSetPageHWPoison(hpage))) {
104862- atomic_long_sub(nr_pages, &num_poisoned_pages);
104863+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
104864 unlock_page(hpage);
104865 return 0;
104866 }
104867@@ -1184,14 +1184,14 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
104868 */
104869 if (!PageHWPoison(p)) {
104870 printk(KERN_ERR "MCE %#lx: just unpoisoned\n", pfn);
104871- atomic_long_sub(nr_pages, &num_poisoned_pages);
104872+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
104873 put_page(hpage);
104874 res = 0;
104875 goto out;
104876 }
104877 if (hwpoison_filter(p)) {
104878 if (TestClearPageHWPoison(p))
104879- atomic_long_sub(nr_pages, &num_poisoned_pages);
104880+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
104881 unlock_page(hpage);
104882 put_page(hpage);
104883 return 0;
104884@@ -1421,7 +1421,7 @@ int unpoison_memory(unsigned long pfn)
104885 return 0;
104886 }
104887 if (TestClearPageHWPoison(p))
104888- atomic_long_dec(&num_poisoned_pages);
104889+ atomic_long_dec_unchecked(&num_poisoned_pages);
104890 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
104891 return 0;
104892 }
104893@@ -1435,7 +1435,7 @@ int unpoison_memory(unsigned long pfn)
104894 */
104895 if (TestClearPageHWPoison(page)) {
104896 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
104897- atomic_long_sub(nr_pages, &num_poisoned_pages);
104898+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
104899 freeit = 1;
104900 if (PageHuge(page))
104901 clear_page_hwpoison_huge_page(page);
104902@@ -1560,11 +1560,11 @@ static int soft_offline_huge_page(struct page *page, int flags)
104903 if (PageHuge(page)) {
104904 set_page_hwpoison_huge_page(hpage);
104905 dequeue_hwpoisoned_huge_page(hpage);
104906- atomic_long_add(1 << compound_order(hpage),
104907+ atomic_long_add_unchecked(1 << compound_order(hpage),
104908 &num_poisoned_pages);
104909 } else {
104910 SetPageHWPoison(page);
104911- atomic_long_inc(&num_poisoned_pages);
104912+ atomic_long_inc_unchecked(&num_poisoned_pages);
104913 }
104914 }
104915 return ret;
104916@@ -1603,7 +1603,7 @@ static int __soft_offline_page(struct page *page, int flags)
104917 put_page(page);
104918 pr_info("soft_offline: %#lx: invalidated\n", pfn);
104919 SetPageHWPoison(page);
104920- atomic_long_inc(&num_poisoned_pages);
104921+ atomic_long_inc_unchecked(&num_poisoned_pages);
104922 return 0;
104923 }
104924
104925@@ -1652,7 +1652,7 @@ static int __soft_offline_page(struct page *page, int flags)
104926 if (!is_free_buddy_page(page))
104927 pr_info("soft offline: %#lx: page leaked\n",
104928 pfn);
104929- atomic_long_inc(&num_poisoned_pages);
104930+ atomic_long_inc_unchecked(&num_poisoned_pages);
104931 }
104932 } else {
104933 pr_info("soft offline: %#lx: isolation failed: %d, page count %d, type %lx\n",
104934@@ -1722,11 +1722,11 @@ int soft_offline_page(struct page *page, int flags)
104935 if (PageHuge(page)) {
104936 set_page_hwpoison_huge_page(hpage);
104937 if (!dequeue_hwpoisoned_huge_page(hpage))
104938- atomic_long_add(1 << compound_order(hpage),
104939+ atomic_long_add_unchecked(1 << compound_order(hpage),
104940 &num_poisoned_pages);
104941 } else {
104942 if (!TestSetPageHWPoison(page))
104943- atomic_long_inc(&num_poisoned_pages);
104944+ atomic_long_inc_unchecked(&num_poisoned_pages);
104945 }
104946 }
104947 unset_migratetype_isolate(page, MIGRATE_MOVABLE);
104948diff --git a/mm/memory.c b/mm/memory.c
104949index 97839f5..4bc5530 100644
104950--- a/mm/memory.c
104951+++ b/mm/memory.c
104952@@ -414,6 +414,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
104953 free_pte_range(tlb, pmd, addr);
104954 } while (pmd++, addr = next, addr != end);
104955
104956+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
104957 start &= PUD_MASK;
104958 if (start < floor)
104959 return;
104960@@ -429,6 +430,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
104961 pud_clear(pud);
104962 pmd_free_tlb(tlb, pmd, start);
104963 mm_dec_nr_pmds(tlb->mm);
104964+#endif
104965 }
104966
104967 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
104968@@ -448,6 +450,7 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
104969 free_pmd_range(tlb, pud, addr, next, floor, ceiling);
104970 } while (pud++, addr = next, addr != end);
104971
104972+#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
104973 start &= PGDIR_MASK;
104974 if (start < floor)
104975 return;
104976@@ -462,6 +465,8 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
104977 pud = pud_offset(pgd, start);
104978 pgd_clear(pgd);
104979 pud_free_tlb(tlb, pud, start);
104980+#endif
104981+
104982 }
104983
104984 /*
104985@@ -691,10 +696,10 @@ static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr,
104986 * Choose text because data symbols depend on CONFIG_KALLSYMS_ALL=y
104987 */
104988 if (vma->vm_ops)
104989- printk(KERN_ALERT "vma->vm_ops->fault: %pSR\n",
104990+ printk(KERN_ALERT "vma->vm_ops->fault: %pAR\n",
104991 vma->vm_ops->fault);
104992 if (vma->vm_file)
104993- printk(KERN_ALERT "vma->vm_file->f_op->mmap: %pSR\n",
104994+ printk(KERN_ALERT "vma->vm_file->f_op->mmap: %pAR\n",
104995 vma->vm_file->f_op->mmap);
104996 dump_stack();
104997 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
104998@@ -1464,6 +1469,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
104999 page_add_file_rmap(page);
105000 set_pte_at(mm, addr, pte, mk_pte(page, prot));
105001
105002+#ifdef CONFIG_PAX_SEGMEXEC
105003+ pax_mirror_file_pte(vma, addr, page, ptl);
105004+#endif
105005+
105006 retval = 0;
105007 pte_unmap_unlock(pte, ptl);
105008 return retval;
105009@@ -1508,9 +1517,21 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
105010 if (!page_count(page))
105011 return -EINVAL;
105012 if (!(vma->vm_flags & VM_MIXEDMAP)) {
105013+
105014+#ifdef CONFIG_PAX_SEGMEXEC
105015+ struct vm_area_struct *vma_m;
105016+#endif
105017+
105018 BUG_ON(down_read_trylock(&vma->vm_mm->mmap_sem));
105019 BUG_ON(vma->vm_flags & VM_PFNMAP);
105020 vma->vm_flags |= VM_MIXEDMAP;
105021+
105022+#ifdef CONFIG_PAX_SEGMEXEC
105023+ vma_m = pax_find_mirror_vma(vma);
105024+ if (vma_m)
105025+ vma_m->vm_flags |= VM_MIXEDMAP;
105026+#endif
105027+
105028 }
105029 return insert_page(vma, addr, page, vma->vm_page_prot);
105030 }
105031@@ -1593,6 +1614,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
105032 unsigned long pfn)
105033 {
105034 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
105035+ BUG_ON(vma->vm_mirror);
105036
105037 if (addr < vma->vm_start || addr >= vma->vm_end)
105038 return -EFAULT;
105039@@ -1840,7 +1862,9 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
105040
105041 BUG_ON(pud_huge(*pud));
105042
105043- pmd = pmd_alloc(mm, pud, addr);
105044+ pmd = (mm == &init_mm) ?
105045+ pmd_alloc_kernel(mm, pud, addr) :
105046+ pmd_alloc(mm, pud, addr);
105047 if (!pmd)
105048 return -ENOMEM;
105049 do {
105050@@ -1860,7 +1884,9 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
105051 unsigned long next;
105052 int err;
105053
105054- pud = pud_alloc(mm, pgd, addr);
105055+ pud = (mm == &init_mm) ?
105056+ pud_alloc_kernel(mm, pgd, addr) :
105057+ pud_alloc(mm, pgd, addr);
105058 if (!pud)
105059 return -ENOMEM;
105060 do {
105061@@ -1982,6 +2008,185 @@ static int do_page_mkwrite(struct vm_area_struct *vma, struct page *page,
105062 return ret;
105063 }
105064
105065+#ifdef CONFIG_PAX_SEGMEXEC
105066+static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
105067+{
105068+ struct mm_struct *mm = vma->vm_mm;
105069+ spinlock_t *ptl;
105070+ pte_t *pte, entry;
105071+
105072+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
105073+ entry = *pte;
105074+ if (!pte_present(entry)) {
105075+ if (!pte_none(entry)) {
105076+ free_swap_and_cache(pte_to_swp_entry(entry));
105077+ pte_clear_not_present_full(mm, address, pte, 0);
105078+ }
105079+ } else {
105080+ struct page *page;
105081+
105082+ flush_cache_page(vma, address, pte_pfn(entry));
105083+ entry = ptep_clear_flush(vma, address, pte);
105084+ BUG_ON(pte_dirty(entry));
105085+ page = vm_normal_page(vma, address, entry);
105086+ if (page) {
105087+ update_hiwater_rss(mm);
105088+ if (PageAnon(page))
105089+ dec_mm_counter_fast(mm, MM_ANONPAGES);
105090+ else
105091+ dec_mm_counter_fast(mm, MM_FILEPAGES);
105092+ page_remove_rmap(page);
105093+ page_cache_release(page);
105094+ }
105095+ }
105096+ pte_unmap_unlock(pte, ptl);
105097+}
105098+
105099+/* PaX: if vma is mirrored, synchronize the mirror's PTE
105100+ *
105101+ * the ptl of the lower mapped page is held on entry and is not released on exit
105102+ * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
105103+ */
105104+static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
105105+{
105106+ struct mm_struct *mm = vma->vm_mm;
105107+ unsigned long address_m;
105108+ spinlock_t *ptl_m;
105109+ struct vm_area_struct *vma_m;
105110+ pmd_t *pmd_m;
105111+ pte_t *pte_m, entry_m;
105112+
105113+ BUG_ON(!page_m || !PageAnon(page_m));
105114+
105115+ vma_m = pax_find_mirror_vma(vma);
105116+ if (!vma_m)
105117+ return;
105118+
105119+ BUG_ON(!PageLocked(page_m));
105120+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
105121+ address_m = address + SEGMEXEC_TASK_SIZE;
105122+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
105123+ pte_m = pte_offset_map(pmd_m, address_m);
105124+ ptl_m = pte_lockptr(mm, pmd_m);
105125+ if (ptl != ptl_m) {
105126+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
105127+ if (!pte_none(*pte_m))
105128+ goto out;
105129+ }
105130+
105131+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
105132+ page_cache_get(page_m);
105133+ page_add_anon_rmap(page_m, vma_m, address_m);
105134+ inc_mm_counter_fast(mm, MM_ANONPAGES);
105135+ set_pte_at(mm, address_m, pte_m, entry_m);
105136+ update_mmu_cache(vma_m, address_m, pte_m);
105137+out:
105138+ if (ptl != ptl_m)
105139+ spin_unlock(ptl_m);
105140+ pte_unmap(pte_m);
105141+ unlock_page(page_m);
105142+}
105143+
105144+void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
105145+{
105146+ struct mm_struct *mm = vma->vm_mm;
105147+ unsigned long address_m;
105148+ spinlock_t *ptl_m;
105149+ struct vm_area_struct *vma_m;
105150+ pmd_t *pmd_m;
105151+ pte_t *pte_m, entry_m;
105152+
105153+ BUG_ON(!page_m || PageAnon(page_m));
105154+
105155+ vma_m = pax_find_mirror_vma(vma);
105156+ if (!vma_m)
105157+ return;
105158+
105159+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
105160+ address_m = address + SEGMEXEC_TASK_SIZE;
105161+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
105162+ pte_m = pte_offset_map(pmd_m, address_m);
105163+ ptl_m = pte_lockptr(mm, pmd_m);
105164+ if (ptl != ptl_m) {
105165+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
105166+ if (!pte_none(*pte_m))
105167+ goto out;
105168+ }
105169+
105170+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
105171+ page_cache_get(page_m);
105172+ page_add_file_rmap(page_m);
105173+ inc_mm_counter_fast(mm, MM_FILEPAGES);
105174+ set_pte_at(mm, address_m, pte_m, entry_m);
105175+ update_mmu_cache(vma_m, address_m, pte_m);
105176+out:
105177+ if (ptl != ptl_m)
105178+ spin_unlock(ptl_m);
105179+ pte_unmap(pte_m);
105180+}
105181+
105182+static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
105183+{
105184+ struct mm_struct *mm = vma->vm_mm;
105185+ unsigned long address_m;
105186+ spinlock_t *ptl_m;
105187+ struct vm_area_struct *vma_m;
105188+ pmd_t *pmd_m;
105189+ pte_t *pte_m, entry_m;
105190+
105191+ vma_m = pax_find_mirror_vma(vma);
105192+ if (!vma_m)
105193+ return;
105194+
105195+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
105196+ address_m = address + SEGMEXEC_TASK_SIZE;
105197+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
105198+ pte_m = pte_offset_map(pmd_m, address_m);
105199+ ptl_m = pte_lockptr(mm, pmd_m);
105200+ if (ptl != ptl_m) {
105201+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
105202+ if (!pte_none(*pte_m))
105203+ goto out;
105204+ }
105205+
105206+ entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
105207+ set_pte_at(mm, address_m, pte_m, entry_m);
105208+out:
105209+ if (ptl != ptl_m)
105210+ spin_unlock(ptl_m);
105211+ pte_unmap(pte_m);
105212+}
105213+
105214+static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
105215+{
105216+ struct page *page_m;
105217+ pte_t entry;
105218+
105219+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
105220+ goto out;
105221+
105222+ entry = *pte;
105223+ page_m = vm_normal_page(vma, address, entry);
105224+ if (!page_m)
105225+ pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
105226+ else if (PageAnon(page_m)) {
105227+ if (pax_find_mirror_vma(vma)) {
105228+ pte_unmap_unlock(pte, ptl);
105229+ lock_page(page_m);
105230+ pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
105231+ if (pte_same(entry, *pte))
105232+ pax_mirror_anon_pte(vma, address, page_m, ptl);
105233+ else
105234+ unlock_page(page_m);
105235+ }
105236+ } else
105237+ pax_mirror_file_pte(vma, address, page_m, ptl);
105238+
105239+out:
105240+ pte_unmap_unlock(pte, ptl);
105241+}
105242+#endif
105243+
105244 /*
105245 * This routine handles present pages, when users try to write
105246 * to a shared page. It is done by copying the page to a new address
105247@@ -2172,6 +2377,12 @@ gotten:
105248 */
105249 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
105250 if (likely(pte_same(*page_table, orig_pte))) {
105251+
105252+#ifdef CONFIG_PAX_SEGMEXEC
105253+ if (pax_find_mirror_vma(vma))
105254+ BUG_ON(!trylock_page(new_page));
105255+#endif
105256+
105257 if (old_page) {
105258 if (!PageAnon(old_page)) {
105259 dec_mm_counter_fast(mm, MM_FILEPAGES);
105260@@ -2225,6 +2436,10 @@ gotten:
105261 page_remove_rmap(old_page);
105262 }
105263
105264+#ifdef CONFIG_PAX_SEGMEXEC
105265+ pax_mirror_anon_pte(vma, address, new_page, ptl);
105266+#endif
105267+
105268 /* Free the old page.. */
105269 new_page = old_page;
105270 ret |= VM_FAULT_WRITE;
105271@@ -2483,6 +2698,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
105272 swap_free(entry);
105273 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
105274 try_to_free_swap(page);
105275+
105276+#ifdef CONFIG_PAX_SEGMEXEC
105277+ if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
105278+#endif
105279+
105280 unlock_page(page);
105281 if (page != swapcache) {
105282 /*
105283@@ -2506,6 +2726,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
105284
105285 /* No need to invalidate - it was non-present before */
105286 update_mmu_cache(vma, address, page_table);
105287+
105288+#ifdef CONFIG_PAX_SEGMEXEC
105289+ pax_mirror_anon_pte(vma, address, page, ptl);
105290+#endif
105291+
105292 unlock:
105293 pte_unmap_unlock(page_table, ptl);
105294 out:
105295@@ -2525,40 +2750,6 @@ out_release:
105296 }
105297
105298 /*
105299- * This is like a special single-page "expand_{down|up}wards()",
105300- * except we must first make sure that 'address{-|+}PAGE_SIZE'
105301- * doesn't hit another vma.
105302- */
105303-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
105304-{
105305- address &= PAGE_MASK;
105306- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
105307- struct vm_area_struct *prev = vma->vm_prev;
105308-
105309- /*
105310- * Is there a mapping abutting this one below?
105311- *
105312- * That's only ok if it's the same stack mapping
105313- * that has gotten split..
105314- */
105315- if (prev && prev->vm_end == address)
105316- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
105317-
105318- return expand_downwards(vma, address - PAGE_SIZE);
105319- }
105320- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
105321- struct vm_area_struct *next = vma->vm_next;
105322-
105323- /* As VM_GROWSDOWN but s/below/above/ */
105324- if (next && next->vm_start == address + PAGE_SIZE)
105325- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
105326-
105327- return expand_upwards(vma, address + PAGE_SIZE);
105328- }
105329- return 0;
105330-}
105331-
105332-/*
105333 * We enter with non-exclusive mmap_sem (to exclude vma changes,
105334 * but allow concurrent faults), and pte mapped but not yet locked.
105335 * We return with mmap_sem still held, but pte unmapped and unlocked.
105336@@ -2568,27 +2759,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
105337 unsigned int flags)
105338 {
105339 struct mem_cgroup *memcg;
105340- struct page *page;
105341+ struct page *page = NULL;
105342 spinlock_t *ptl;
105343 pte_t entry;
105344
105345- pte_unmap(page_table);
105346-
105347- /* Check if we need to add a guard page to the stack */
105348- if (check_stack_guard_page(vma, address) < 0)
105349- return VM_FAULT_SIGSEGV;
105350-
105351- /* Use the zero-page for reads */
105352 if (!(flags & FAULT_FLAG_WRITE) && !mm_forbids_zeropage(mm)) {
105353 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
105354 vma->vm_page_prot));
105355- page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
105356+ ptl = pte_lockptr(mm, pmd);
105357+ spin_lock(ptl);
105358 if (!pte_none(*page_table))
105359 goto unlock;
105360 goto setpte;
105361 }
105362
105363 /* Allocate our own private page. */
105364+ pte_unmap(page_table);
105365+
105366 if (unlikely(anon_vma_prepare(vma)))
105367 goto oom;
105368 page = alloc_zeroed_user_highpage_movable(vma, address);
105369@@ -2612,6 +2799,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
105370 if (!pte_none(*page_table))
105371 goto release;
105372
105373+#ifdef CONFIG_PAX_SEGMEXEC
105374+ if (pax_find_mirror_vma(vma))
105375+ BUG_ON(!trylock_page(page));
105376+#endif
105377+
105378 inc_mm_counter_fast(mm, MM_ANONPAGES);
105379 page_add_new_anon_rmap(page, vma, address);
105380 mem_cgroup_commit_charge(page, memcg, false);
105381@@ -2621,6 +2813,12 @@ setpte:
105382
105383 /* No need to invalidate - it was non-present before */
105384 update_mmu_cache(vma, address, page_table);
105385+
105386+#ifdef CONFIG_PAX_SEGMEXEC
105387+ if (page)
105388+ pax_mirror_anon_pte(vma, address, page, ptl);
105389+#endif
105390+
105391 unlock:
105392 pte_unmap_unlock(page_table, ptl);
105393 return 0;
105394@@ -2853,6 +3051,11 @@ static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma,
105395 return ret;
105396 }
105397 do_set_pte(vma, address, fault_page, pte, false, false);
105398+
105399+#ifdef CONFIG_PAX_SEGMEXEC
105400+ pax_mirror_file_pte(vma, address, fault_page, ptl);
105401+#endif
105402+
105403 unlock_page(fault_page);
105404 unlock_out:
105405 pte_unmap_unlock(pte, ptl);
105406@@ -2904,7 +3107,18 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma,
105407 }
105408 goto uncharge_out;
105409 }
105410+
105411+#ifdef CONFIG_PAX_SEGMEXEC
105412+ if (pax_find_mirror_vma(vma))
105413+ BUG_ON(!trylock_page(new_page));
105414+#endif
105415+
105416 do_set_pte(vma, address, new_page, pte, true, true);
105417+
105418+#ifdef CONFIG_PAX_SEGMEXEC
105419+ pax_mirror_anon_pte(vma, address, new_page, ptl);
105420+#endif
105421+
105422 mem_cgroup_commit_charge(new_page, memcg, false);
105423 lru_cache_add_active_or_unevictable(new_page, vma);
105424 pte_unmap_unlock(pte, ptl);
105425@@ -2962,6 +3176,11 @@ static int do_shared_fault(struct mm_struct *mm, struct vm_area_struct *vma,
105426 return ret;
105427 }
105428 do_set_pte(vma, address, fault_page, pte, true, false);
105429+
105430+#ifdef CONFIG_PAX_SEGMEXEC
105431+ pax_mirror_file_pte(vma, address, fault_page, ptl);
105432+#endif
105433+
105434 pte_unmap_unlock(pte, ptl);
105435
105436 if (set_page_dirty(fault_page))
105437@@ -3185,6 +3404,12 @@ static int handle_pte_fault(struct mm_struct *mm,
105438 if (flags & FAULT_FLAG_WRITE)
105439 flush_tlb_fix_spurious_fault(vma, address);
105440 }
105441+
105442+#ifdef CONFIG_PAX_SEGMEXEC
105443+ pax_mirror_pte(vma, address, pte, pmd, ptl);
105444+ return 0;
105445+#endif
105446+
105447 unlock:
105448 pte_unmap_unlock(pte, ptl);
105449 return 0;
105450@@ -3204,9 +3429,41 @@ static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
105451 pmd_t *pmd;
105452 pte_t *pte;
105453
105454+#ifdef CONFIG_PAX_SEGMEXEC
105455+ struct vm_area_struct *vma_m;
105456+#endif
105457+
105458 if (unlikely(is_vm_hugetlb_page(vma)))
105459 return hugetlb_fault(mm, vma, address, flags);
105460
105461+#ifdef CONFIG_PAX_SEGMEXEC
105462+ vma_m = pax_find_mirror_vma(vma);
105463+ if (vma_m) {
105464+ unsigned long address_m;
105465+ pgd_t *pgd_m;
105466+ pud_t *pud_m;
105467+ pmd_t *pmd_m;
105468+
105469+ if (vma->vm_start > vma_m->vm_start) {
105470+ address_m = address;
105471+ address -= SEGMEXEC_TASK_SIZE;
105472+ vma = vma_m;
105473+ } else
105474+ address_m = address + SEGMEXEC_TASK_SIZE;
105475+
105476+ pgd_m = pgd_offset(mm, address_m);
105477+ pud_m = pud_alloc(mm, pgd_m, address_m);
105478+ if (!pud_m)
105479+ return VM_FAULT_OOM;
105480+ pmd_m = pmd_alloc(mm, pud_m, address_m);
105481+ if (!pmd_m)
105482+ return VM_FAULT_OOM;
105483+ if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
105484+ return VM_FAULT_OOM;
105485+ pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
105486+ }
105487+#endif
105488+
105489 pgd = pgd_offset(mm, address);
105490 pud = pud_alloc(mm, pgd, address);
105491 if (!pud)
105492@@ -3341,6 +3598,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
105493 spin_unlock(&mm->page_table_lock);
105494 return 0;
105495 }
105496+
105497+int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
105498+{
105499+ pud_t *new = pud_alloc_one(mm, address);
105500+ if (!new)
105501+ return -ENOMEM;
105502+
105503+ smp_wmb(); /* See comment in __pte_alloc */
105504+
105505+ spin_lock(&mm->page_table_lock);
105506+ if (pgd_present(*pgd)) /* Another has populated it */
105507+ pud_free(mm, new);
105508+ else
105509+ pgd_populate_kernel(mm, pgd, new);
105510+ spin_unlock(&mm->page_table_lock);
105511+ return 0;
105512+}
105513 #endif /* __PAGETABLE_PUD_FOLDED */
105514
105515 #ifndef __PAGETABLE_PMD_FOLDED
105516@@ -3373,6 +3647,32 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
105517 spin_unlock(&mm->page_table_lock);
105518 return 0;
105519 }
105520+
105521+int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
105522+{
105523+ pmd_t *new = pmd_alloc_one(mm, address);
105524+ if (!new)
105525+ return -ENOMEM;
105526+
105527+ smp_wmb(); /* See comment in __pte_alloc */
105528+
105529+ spin_lock(&mm->page_table_lock);
105530+#ifndef __ARCH_HAS_4LEVEL_HACK
105531+ if (!pud_present(*pud)) {
105532+ mm_inc_nr_pmds(mm);
105533+ pud_populate_kernel(mm, pud, new);
105534+ } else /* Another has populated it */
105535+ pmd_free(mm, new);
105536+#else
105537+ if (!pgd_present(*pud)) {
105538+ mm_inc_nr_pmds(mm);
105539+ pgd_populate_kernel(mm, pud, new);
105540+ } else /* Another has populated it */
105541+ pmd_free(mm, new);
105542+#endif /* __ARCH_HAS_4LEVEL_HACK */
105543+ spin_unlock(&mm->page_table_lock);
105544+ return 0;
105545+}
105546 #endif /* __PAGETABLE_PMD_FOLDED */
105547
105548 static int __follow_pte(struct mm_struct *mm, unsigned long address,
105549@@ -3482,8 +3782,8 @@ out:
105550 return ret;
105551 }
105552
105553-int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
105554- void *buf, int len, int write)
105555+ssize_t generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
105556+ void *buf, size_t len, int write)
105557 {
105558 resource_size_t phys_addr;
105559 unsigned long prot = 0;
105560@@ -3509,8 +3809,8 @@ EXPORT_SYMBOL_GPL(generic_access_phys);
105561 * Access another process' address space as given in mm. If non-NULL, use the
105562 * given task for page fault accounting.
105563 */
105564-static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
105565- unsigned long addr, void *buf, int len, int write)
105566+static ssize_t __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
105567+ unsigned long addr, void *buf, size_t len, int write)
105568 {
105569 struct vm_area_struct *vma;
105570 void *old_buf = buf;
105571@@ -3518,7 +3818,7 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
105572 down_read(&mm->mmap_sem);
105573 /* ignore errors, just check how much was successfully transferred */
105574 while (len) {
105575- int bytes, ret, offset;
105576+ ssize_t bytes, ret, offset;
105577 void *maddr;
105578 struct page *page = NULL;
105579
105580@@ -3579,8 +3879,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
105581 *
105582 * The caller must hold a reference on @mm.
105583 */
105584-int access_remote_vm(struct mm_struct *mm, unsigned long addr,
105585- void *buf, int len, int write)
105586+ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
105587+ void *buf, size_t len, int write)
105588 {
105589 return __access_remote_vm(NULL, mm, addr, buf, len, write);
105590 }
105591@@ -3590,11 +3890,11 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
105592 * Source/target buffer must be kernel space,
105593 * Do not walk the page table directly, use get_user_pages
105594 */
105595-int access_process_vm(struct task_struct *tsk, unsigned long addr,
105596- void *buf, int len, int write)
105597+ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr,
105598+ void *buf, size_t len, int write)
105599 {
105600 struct mm_struct *mm;
105601- int ret;
105602+ ssize_t ret;
105603
105604 mm = get_task_mm(tsk);
105605 if (!mm)
105606diff --git a/mm/mempolicy.c b/mm/mempolicy.c
105607index 0f7d73b..737047f 100644
105608--- a/mm/mempolicy.c
105609+++ b/mm/mempolicy.c
105610@@ -703,6 +703,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
105611 unsigned long vmstart;
105612 unsigned long vmend;
105613
105614+#ifdef CONFIG_PAX_SEGMEXEC
105615+ struct vm_area_struct *vma_m;
105616+#endif
105617+
105618 vma = find_vma(mm, start);
105619 if (!vma || vma->vm_start > start)
105620 return -EFAULT;
105621@@ -746,6 +750,16 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
105622 err = vma_replace_policy(vma, new_pol);
105623 if (err)
105624 goto out;
105625+
105626+#ifdef CONFIG_PAX_SEGMEXEC
105627+ vma_m = pax_find_mirror_vma(vma);
105628+ if (vma_m) {
105629+ err = vma_replace_policy(vma_m, new_pol);
105630+ if (err)
105631+ goto out;
105632+ }
105633+#endif
105634+
105635 }
105636
105637 out:
105638@@ -1160,6 +1174,17 @@ static long do_mbind(unsigned long start, unsigned long len,
105639
105640 if (end < start)
105641 return -EINVAL;
105642+
105643+#ifdef CONFIG_PAX_SEGMEXEC
105644+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
105645+ if (end > SEGMEXEC_TASK_SIZE)
105646+ return -EINVAL;
105647+ } else
105648+#endif
105649+
105650+ if (end > TASK_SIZE)
105651+ return -EINVAL;
105652+
105653 if (end == start)
105654 return 0;
105655
105656@@ -1385,8 +1410,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
105657 */
105658 tcred = __task_cred(task);
105659 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
105660- !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
105661- !capable(CAP_SYS_NICE)) {
105662+ !uid_eq(cred->uid, tcred->suid) && !capable(CAP_SYS_NICE)) {
105663 rcu_read_unlock();
105664 err = -EPERM;
105665 goto out_put;
105666@@ -1417,6 +1441,15 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
105667 goto out;
105668 }
105669
105670+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
105671+ if (mm != current->mm &&
105672+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
105673+ mmput(mm);
105674+ err = -EPERM;
105675+ goto out;
105676+ }
105677+#endif
105678+
105679 err = do_migrate_pages(mm, old, new,
105680 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
105681
105682diff --git a/mm/migrate.c b/mm/migrate.c
105683index 85e0426..be49beb 100644
105684--- a/mm/migrate.c
105685+++ b/mm/migrate.c
105686@@ -1472,8 +1472,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
105687 */
105688 tcred = __task_cred(task);
105689 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
105690- !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
105691- !capable(CAP_SYS_NICE)) {
105692+ !uid_eq(cred->uid, tcred->suid) && !capable(CAP_SYS_NICE)) {
105693 rcu_read_unlock();
105694 err = -EPERM;
105695 goto out;
105696diff --git a/mm/mlock.c b/mm/mlock.c
105697index 8a54cd2..92f1747 100644
105698--- a/mm/mlock.c
105699+++ b/mm/mlock.c
105700@@ -14,6 +14,7 @@
105701 #include <linux/pagevec.h>
105702 #include <linux/mempolicy.h>
105703 #include <linux/syscalls.h>
105704+#include <linux/security.h>
105705 #include <linux/sched.h>
105706 #include <linux/export.h>
105707 #include <linux/rmap.h>
105708@@ -613,7 +614,7 @@ static int do_mlock(unsigned long start, size_t len, int on)
105709 {
105710 unsigned long nstart, end, tmp;
105711 struct vm_area_struct * vma, * prev;
105712- int error;
105713+ int error = 0;
105714
105715 VM_BUG_ON(start & ~PAGE_MASK);
105716 VM_BUG_ON(len != PAGE_ALIGN(len));
105717@@ -622,6 +623,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
105718 return -EINVAL;
105719 if (end == start)
105720 return 0;
105721+ if (end > TASK_SIZE)
105722+ return -EINVAL;
105723+
105724 vma = find_vma(current->mm, start);
105725 if (!vma || vma->vm_start > start)
105726 return -ENOMEM;
105727@@ -633,6 +637,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
105728 for (nstart = start ; ; ) {
105729 vm_flags_t newflags;
105730
105731+#ifdef CONFIG_PAX_SEGMEXEC
105732+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
105733+ break;
105734+#endif
105735+
105736 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
105737
105738 newflags = vma->vm_flags & ~VM_LOCKED;
105739@@ -746,6 +755,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
105740 locked += current->mm->locked_vm;
105741
105742 /* check against resource limits */
105743+ gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
105744 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
105745 error = do_mlock(start, len, 1);
105746
105747@@ -783,6 +793,11 @@ static int do_mlockall(int flags)
105748 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
105749 vm_flags_t newflags;
105750
105751+#ifdef CONFIG_PAX_SEGMEXEC
105752+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
105753+ break;
105754+#endif
105755+
105756 newflags = vma->vm_flags & ~VM_LOCKED;
105757 if (flags & MCL_CURRENT)
105758 newflags |= VM_LOCKED;
105759@@ -814,8 +829,10 @@ SYSCALL_DEFINE1(mlockall, int, flags)
105760 lock_limit >>= PAGE_SHIFT;
105761
105762 ret = -ENOMEM;
105763+
105764+ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
105765+
105766 down_write(&current->mm->mmap_sem);
105767-
105768 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
105769 capable(CAP_IPC_LOCK))
105770 ret = do_mlockall(flags);
105771diff --git a/mm/mm_init.c b/mm/mm_init.c
105772index 5f420f7..dd42fb1b 100644
105773--- a/mm/mm_init.c
105774+++ b/mm/mm_init.c
105775@@ -177,7 +177,7 @@ static int __meminit mm_compute_batch_notifier(struct notifier_block *self,
105776 return NOTIFY_OK;
105777 }
105778
105779-static struct notifier_block compute_batch_nb __meminitdata = {
105780+static struct notifier_block compute_batch_nb __meminitconst = {
105781 .notifier_call = mm_compute_batch_notifier,
105782 .priority = IPC_CALLBACK_PRI, /* use lowest priority */
105783 };
105784diff --git a/mm/mmap.c b/mm/mmap.c
105785index 9ec50a3..0476e2d 100644
105786--- a/mm/mmap.c
105787+++ b/mm/mmap.c
105788@@ -41,6 +41,7 @@
105789 #include <linux/notifier.h>
105790 #include <linux/memory.h>
105791 #include <linux/printk.h>
105792+#include <linux/random.h>
105793
105794 #include <asm/uaccess.h>
105795 #include <asm/cacheflush.h>
105796@@ -57,6 +58,16 @@
105797 #define arch_rebalance_pgtables(addr, len) (addr)
105798 #endif
105799
105800+static inline void verify_mm_writelocked(struct mm_struct *mm)
105801+{
105802+#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
105803+ if (unlikely(down_read_trylock(&mm->mmap_sem))) {
105804+ up_read(&mm->mmap_sem);
105805+ BUG();
105806+ }
105807+#endif
105808+}
105809+
105810 static void unmap_region(struct mm_struct *mm,
105811 struct vm_area_struct *vma, struct vm_area_struct *prev,
105812 unsigned long start, unsigned long end);
105813@@ -76,16 +87,25 @@ static void unmap_region(struct mm_struct *mm,
105814 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
105815 *
105816 */
105817-pgprot_t protection_map[16] = {
105818+pgprot_t protection_map[16] __read_only = {
105819 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
105820 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
105821 };
105822
105823-pgprot_t vm_get_page_prot(unsigned long vm_flags)
105824+pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
105825 {
105826- return __pgprot(pgprot_val(protection_map[vm_flags &
105827+ pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
105828 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
105829 pgprot_val(arch_vm_get_page_prot(vm_flags)));
105830+
105831+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
105832+ if (!(__supported_pte_mask & _PAGE_NX) &&
105833+ (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
105834+ (vm_flags & (VM_READ | VM_WRITE)))
105835+ prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
105836+#endif
105837+
105838+ return prot;
105839 }
105840 EXPORT_SYMBOL(vm_get_page_prot);
105841
105842@@ -114,6 +134,7 @@ unsigned long sysctl_overcommit_kbytes __read_mostly;
105843 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
105844 unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
105845 unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
105846+unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
105847 /*
105848 * Make sure vm_committed_as in one cacheline and not cacheline shared with
105849 * other variables. It can be updated by several CPUs frequently.
105850@@ -271,6 +292,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
105851 struct vm_area_struct *next = vma->vm_next;
105852
105853 might_sleep();
105854+ BUG_ON(vma->vm_mirror);
105855 if (vma->vm_ops && vma->vm_ops->close)
105856 vma->vm_ops->close(vma);
105857 if (vma->vm_file)
105858@@ -284,6 +306,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len);
105859
105860 SYSCALL_DEFINE1(brk, unsigned long, brk)
105861 {
105862+ unsigned long rlim;
105863 unsigned long retval;
105864 unsigned long newbrk, oldbrk;
105865 struct mm_struct *mm = current->mm;
105866@@ -314,7 +337,13 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
105867 * segment grow beyond its set limit the in case where the limit is
105868 * not page aligned -Ram Gupta
105869 */
105870- if (check_data_rlimit(rlimit(RLIMIT_DATA), brk, mm->start_brk,
105871+ rlim = rlimit(RLIMIT_DATA);
105872+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
105873+ /* force a minimum 16MB brk heap on setuid/setgid binaries */
105874+ if (rlim < PAGE_SIZE && (get_dumpable(mm) != SUID_DUMP_USER) && gr_is_global_nonroot(current_uid()))
105875+ rlim = 4096 * PAGE_SIZE;
105876+#endif
105877+ if (check_data_rlimit(rlim, brk, mm->start_brk,
105878 mm->end_data, mm->start_data))
105879 goto out;
105880
105881@@ -967,6 +996,12 @@ static int
105882 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
105883 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
105884 {
105885+
105886+#ifdef CONFIG_PAX_SEGMEXEC
105887+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
105888+ return 0;
105889+#endif
105890+
105891 if (is_mergeable_vma(vma, file, vm_flags) &&
105892 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
105893 if (vma->vm_pgoff == vm_pgoff)
105894@@ -986,6 +1021,12 @@ static int
105895 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
105896 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
105897 {
105898+
105899+#ifdef CONFIG_PAX_SEGMEXEC
105900+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
105901+ return 0;
105902+#endif
105903+
105904 if (is_mergeable_vma(vma, file, vm_flags) &&
105905 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
105906 pgoff_t vm_pglen;
105907@@ -1035,6 +1076,13 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
105908 struct vm_area_struct *area, *next;
105909 int err;
105910
105911+#ifdef CONFIG_PAX_SEGMEXEC
105912+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
105913+ struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
105914+
105915+ BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
105916+#endif
105917+
105918 /*
105919 * We later require that vma->vm_flags == vm_flags,
105920 * so this tests vma->vm_flags & VM_SPECIAL, too.
105921@@ -1050,6 +1098,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
105922 if (next && next->vm_end == end) /* cases 6, 7, 8 */
105923 next = next->vm_next;
105924
105925+#ifdef CONFIG_PAX_SEGMEXEC
105926+ if (prev)
105927+ prev_m = pax_find_mirror_vma(prev);
105928+ if (area)
105929+ area_m = pax_find_mirror_vma(area);
105930+ if (next)
105931+ next_m = pax_find_mirror_vma(next);
105932+#endif
105933+
105934 /*
105935 * Can it merge with the predecessor?
105936 */
105937@@ -1069,9 +1126,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
105938 /* cases 1, 6 */
105939 err = vma_adjust(prev, prev->vm_start,
105940 next->vm_end, prev->vm_pgoff, NULL);
105941- } else /* cases 2, 5, 7 */
105942+
105943+#ifdef CONFIG_PAX_SEGMEXEC
105944+ if (!err && prev_m)
105945+ err = vma_adjust(prev_m, prev_m->vm_start,
105946+ next_m->vm_end, prev_m->vm_pgoff, NULL);
105947+#endif
105948+
105949+ } else { /* cases 2, 5, 7 */
105950 err = vma_adjust(prev, prev->vm_start,
105951 end, prev->vm_pgoff, NULL);
105952+
105953+#ifdef CONFIG_PAX_SEGMEXEC
105954+ if (!err && prev_m)
105955+ err = vma_adjust(prev_m, prev_m->vm_start,
105956+ end_m, prev_m->vm_pgoff, NULL);
105957+#endif
105958+
105959+ }
105960 if (err)
105961 return NULL;
105962 khugepaged_enter_vma_merge(prev, vm_flags);
105963@@ -1085,12 +1157,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
105964 mpol_equal(policy, vma_policy(next)) &&
105965 can_vma_merge_before(next, vm_flags,
105966 anon_vma, file, pgoff+pglen)) {
105967- if (prev && addr < prev->vm_end) /* case 4 */
105968+ if (prev && addr < prev->vm_end) { /* case 4 */
105969 err = vma_adjust(prev, prev->vm_start,
105970 addr, prev->vm_pgoff, NULL);
105971- else /* cases 3, 8 */
105972+
105973+#ifdef CONFIG_PAX_SEGMEXEC
105974+ if (!err && prev_m)
105975+ err = vma_adjust(prev_m, prev_m->vm_start,
105976+ addr_m, prev_m->vm_pgoff, NULL);
105977+#endif
105978+
105979+ } else { /* cases 3, 8 */
105980 err = vma_adjust(area, addr, next->vm_end,
105981 next->vm_pgoff - pglen, NULL);
105982+
105983+#ifdef CONFIG_PAX_SEGMEXEC
105984+ if (!err && area_m)
105985+ err = vma_adjust(area_m, addr_m, next_m->vm_end,
105986+ next_m->vm_pgoff - pglen, NULL);
105987+#endif
105988+
105989+ }
105990 if (err)
105991 return NULL;
105992 khugepaged_enter_vma_merge(area, vm_flags);
105993@@ -1199,8 +1286,10 @@ none:
105994 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
105995 struct file *file, long pages)
105996 {
105997- const unsigned long stack_flags
105998- = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
105999+
106000+#ifdef CONFIG_PAX_RANDMMAP
106001+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
106002+#endif
106003
106004 mm->total_vm += pages;
106005
106006@@ -1208,7 +1297,7 @@ void vm_stat_account(struct mm_struct *mm, unsigned long flags,
106007 mm->shared_vm += pages;
106008 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
106009 mm->exec_vm += pages;
106010- } else if (flags & stack_flags)
106011+ } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
106012 mm->stack_vm += pages;
106013 }
106014 #endif /* CONFIG_PROC_FS */
106015@@ -1238,6 +1327,7 @@ static inline int mlock_future_check(struct mm_struct *mm,
106016 locked += mm->locked_vm;
106017 lock_limit = rlimit(RLIMIT_MEMLOCK);
106018 lock_limit >>= PAGE_SHIFT;
106019+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
106020 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
106021 return -EAGAIN;
106022 }
106023@@ -1264,7 +1354,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
106024 * (the exception is when the underlying filesystem is noexec
106025 * mounted, in which case we dont add PROT_EXEC.)
106026 */
106027- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
106028+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
106029 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
106030 prot |= PROT_EXEC;
106031
106032@@ -1290,7 +1380,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
106033 /* Obtain the address to map to. we verify (or select) it and ensure
106034 * that it represents a valid section of the address space.
106035 */
106036- addr = get_unmapped_area(file, addr, len, pgoff, flags);
106037+ addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
106038 if (addr & ~PAGE_MASK)
106039 return addr;
106040
106041@@ -1301,6 +1391,43 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
106042 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
106043 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
106044
106045+#ifdef CONFIG_PAX_MPROTECT
106046+ if (mm->pax_flags & MF_PAX_MPROTECT) {
106047+
106048+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
106049+ if (file && !pgoff && (vm_flags & VM_EXEC) && mm->binfmt &&
106050+ mm->binfmt->handle_mmap)
106051+ mm->binfmt->handle_mmap(file);
106052+#endif
106053+
106054+#ifndef CONFIG_PAX_MPROTECT_COMPAT
106055+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
106056+ gr_log_rwxmmap(file);
106057+
106058+#ifdef CONFIG_PAX_EMUPLT
106059+ vm_flags &= ~VM_EXEC;
106060+#else
106061+ return -EPERM;
106062+#endif
106063+
106064+ }
106065+
106066+ if (!(vm_flags & VM_EXEC))
106067+ vm_flags &= ~VM_MAYEXEC;
106068+#else
106069+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
106070+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
106071+#endif
106072+ else
106073+ vm_flags &= ~VM_MAYWRITE;
106074+ }
106075+#endif
106076+
106077+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
106078+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
106079+ vm_flags &= ~VM_PAGEEXEC;
106080+#endif
106081+
106082 if (flags & MAP_LOCKED)
106083 if (!can_do_mlock())
106084 return -EPERM;
106085@@ -1388,6 +1515,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
106086 vm_flags |= VM_NORESERVE;
106087 }
106088
106089+ if (!gr_acl_handle_mmap(file, prot))
106090+ return -EACCES;
106091+
106092 addr = mmap_region(file, addr, len, vm_flags, pgoff);
106093 if (!IS_ERR_VALUE(addr) &&
106094 ((vm_flags & VM_LOCKED) ||
106095@@ -1481,7 +1611,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
106096 vm_flags_t vm_flags = vma->vm_flags;
106097
106098 /* If it was private or non-writable, the write bit is already clear */
106099- if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
106100+ if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
106101 return 0;
106102
106103 /* The backer wishes to know when pages are first written to? */
106104@@ -1532,7 +1662,22 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
106105 struct rb_node **rb_link, *rb_parent;
106106 unsigned long charged = 0;
106107
106108+#ifdef CONFIG_PAX_SEGMEXEC
106109+ struct vm_area_struct *vma_m = NULL;
106110+#endif
106111+
106112+ /*
106113+ * mm->mmap_sem is required to protect against another thread
106114+ * changing the mappings in case we sleep.
106115+ */
106116+ verify_mm_writelocked(mm);
106117+
106118 /* Check against address space limit. */
106119+
106120+#ifdef CONFIG_PAX_RANDMMAP
106121+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (vm_flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
106122+#endif
106123+
106124 if (!may_expand_vm(mm, len >> PAGE_SHIFT)) {
106125 unsigned long nr_pages;
106126
106127@@ -1551,11 +1696,10 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
106128
106129 /* Clear old maps */
106130 error = -ENOMEM;
106131-munmap_back:
106132 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
106133 if (do_munmap(mm, addr, len))
106134 return -ENOMEM;
106135- goto munmap_back;
106136+ BUG_ON(find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent));
106137 }
106138
106139 /*
106140@@ -1586,6 +1730,16 @@ munmap_back:
106141 goto unacct_error;
106142 }
106143
106144+#ifdef CONFIG_PAX_SEGMEXEC
106145+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
106146+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
106147+ if (!vma_m) {
106148+ error = -ENOMEM;
106149+ goto free_vma;
106150+ }
106151+ }
106152+#endif
106153+
106154 vma->vm_mm = mm;
106155 vma->vm_start = addr;
106156 vma->vm_end = addr + len;
106157@@ -1616,6 +1770,13 @@ munmap_back:
106158 if (error)
106159 goto unmap_and_free_vma;
106160
106161+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
106162+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
106163+ vma->vm_flags |= VM_PAGEEXEC;
106164+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
106165+ }
106166+#endif
106167+
106168 /* Can addr have changed??
106169 *
106170 * Answer: Yes, several device drivers can do it in their
106171@@ -1634,6 +1795,12 @@ munmap_back:
106172 }
106173
106174 vma_link(mm, vma, prev, rb_link, rb_parent);
106175+
106176+#ifdef CONFIG_PAX_SEGMEXEC
106177+ if (vma_m)
106178+ BUG_ON(pax_mirror_vma(vma_m, vma));
106179+#endif
106180+
106181 /* Once vma denies write, undo our temporary denial count */
106182 if (file) {
106183 if (vm_flags & VM_SHARED)
106184@@ -1646,6 +1813,7 @@ out:
106185 perf_event_mmap(vma);
106186
106187 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
106188+ track_exec_limit(mm, addr, addr + len, vm_flags);
106189 if (vm_flags & VM_LOCKED) {
106190 if (!((vm_flags & VM_SPECIAL) || is_vm_hugetlb_page(vma) ||
106191 vma == get_gate_vma(current->mm)))
106192@@ -1683,6 +1851,12 @@ allow_write_and_free_vma:
106193 if (vm_flags & VM_DENYWRITE)
106194 allow_write_access(file);
106195 free_vma:
106196+
106197+#ifdef CONFIG_PAX_SEGMEXEC
106198+ if (vma_m)
106199+ kmem_cache_free(vm_area_cachep, vma_m);
106200+#endif
106201+
106202 kmem_cache_free(vm_area_cachep, vma);
106203 unacct_error:
106204 if (charged)
106205@@ -1690,7 +1864,63 @@ unacct_error:
106206 return error;
106207 }
106208
106209-unsigned long unmapped_area(struct vm_unmapped_area_info *info)
106210+#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
106211+unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags)
106212+{
106213+ if ((mm->pax_flags & MF_PAX_RANDMMAP) && !filp && (flags & MAP_STACK))
106214+ return ((prandom_u32() & 0xFF) + 1) << PAGE_SHIFT;
106215+
106216+ return 0;
106217+}
106218+#endif
106219+
106220+bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset)
106221+{
106222+ if (!vma) {
106223+#ifdef CONFIG_STACK_GROWSUP
106224+ if (addr > sysctl_heap_stack_gap)
106225+ vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
106226+ else
106227+ vma = find_vma(current->mm, 0);
106228+ if (vma && (vma->vm_flags & VM_GROWSUP))
106229+ return false;
106230+#endif
106231+ return true;
106232+ }
106233+
106234+ if (addr + len > vma->vm_start)
106235+ return false;
106236+
106237+ if (vma->vm_flags & VM_GROWSDOWN)
106238+ return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
106239+#ifdef CONFIG_STACK_GROWSUP
106240+ else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
106241+ return addr - vma->vm_prev->vm_end >= sysctl_heap_stack_gap;
106242+#endif
106243+ else if (offset)
106244+ return offset <= vma->vm_start - addr - len;
106245+
106246+ return true;
106247+}
106248+
106249+unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset)
106250+{
106251+ if (vma->vm_start < len)
106252+ return -ENOMEM;
106253+
106254+ if (!(vma->vm_flags & VM_GROWSDOWN)) {
106255+ if (offset <= vma->vm_start - len)
106256+ return vma->vm_start - len - offset;
106257+ else
106258+ return -ENOMEM;
106259+ }
106260+
106261+ if (sysctl_heap_stack_gap <= vma->vm_start - len)
106262+ return vma->vm_start - len - sysctl_heap_stack_gap;
106263+ return -ENOMEM;
106264+}
106265+
106266+unsigned long unmapped_area(const struct vm_unmapped_area_info *info)
106267 {
106268 /*
106269 * We implement the search by looking for an rbtree node that
106270@@ -1738,11 +1968,29 @@ unsigned long unmapped_area(struct vm_unmapped_area_info *info)
106271 }
106272 }
106273
106274- gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0;
106275+ gap_start = vma->vm_prev ? vma->vm_prev->vm_end: 0;
106276 check_current:
106277 /* Check if current node has a suitable gap */
106278 if (gap_start > high_limit)
106279 return -ENOMEM;
106280+
106281+ if (gap_end - gap_start > info->threadstack_offset)
106282+ gap_start += info->threadstack_offset;
106283+ else
106284+ gap_start = gap_end;
106285+
106286+ if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP)) {
106287+ if (gap_end - gap_start > sysctl_heap_stack_gap)
106288+ gap_start += sysctl_heap_stack_gap;
106289+ else
106290+ gap_start = gap_end;
106291+ }
106292+ if (vma->vm_flags & VM_GROWSDOWN) {
106293+ if (gap_end - gap_start > sysctl_heap_stack_gap)
106294+ gap_end -= sysctl_heap_stack_gap;
106295+ else
106296+ gap_end = gap_start;
106297+ }
106298 if (gap_end >= low_limit && gap_end - gap_start >= length)
106299 goto found;
106300
106301@@ -1792,7 +2040,7 @@ found:
106302 return gap_start;
106303 }
106304
106305-unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
106306+unsigned long unmapped_area_topdown(const struct vm_unmapped_area_info *info)
106307 {
106308 struct mm_struct *mm = current->mm;
106309 struct vm_area_struct *vma;
106310@@ -1846,6 +2094,24 @@ check_current:
106311 gap_end = vma->vm_start;
106312 if (gap_end < low_limit)
106313 return -ENOMEM;
106314+
106315+ if (gap_end - gap_start > info->threadstack_offset)
106316+ gap_end -= info->threadstack_offset;
106317+ else
106318+ gap_end = gap_start;
106319+
106320+ if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP)) {
106321+ if (gap_end - gap_start > sysctl_heap_stack_gap)
106322+ gap_start += sysctl_heap_stack_gap;
106323+ else
106324+ gap_start = gap_end;
106325+ }
106326+ if (vma->vm_flags & VM_GROWSDOWN) {
106327+ if (gap_end - gap_start > sysctl_heap_stack_gap)
106328+ gap_end -= sysctl_heap_stack_gap;
106329+ else
106330+ gap_end = gap_start;
106331+ }
106332 if (gap_start <= high_limit && gap_end - gap_start >= length)
106333 goto found;
106334
106335@@ -1909,6 +2175,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
106336 struct mm_struct *mm = current->mm;
106337 struct vm_area_struct *vma;
106338 struct vm_unmapped_area_info info;
106339+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
106340
106341 if (len > TASK_SIZE - mmap_min_addr)
106342 return -ENOMEM;
106343@@ -1916,11 +2183,15 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
106344 if (flags & MAP_FIXED)
106345 return addr;
106346
106347+#ifdef CONFIG_PAX_RANDMMAP
106348+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
106349+#endif
106350+
106351 if (addr) {
106352 addr = PAGE_ALIGN(addr);
106353 vma = find_vma(mm, addr);
106354 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
106355- (!vma || addr + len <= vma->vm_start))
106356+ check_heap_stack_gap(vma, addr, len, offset))
106357 return addr;
106358 }
106359
106360@@ -1929,6 +2200,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
106361 info.low_limit = mm->mmap_base;
106362 info.high_limit = TASK_SIZE;
106363 info.align_mask = 0;
106364+ info.threadstack_offset = offset;
106365 return vm_unmapped_area(&info);
106366 }
106367 #endif
106368@@ -1947,6 +2219,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
106369 struct mm_struct *mm = current->mm;
106370 unsigned long addr = addr0;
106371 struct vm_unmapped_area_info info;
106372+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
106373
106374 /* requested length too big for entire address space */
106375 if (len > TASK_SIZE - mmap_min_addr)
106376@@ -1955,12 +2228,16 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
106377 if (flags & MAP_FIXED)
106378 return addr;
106379
106380+#ifdef CONFIG_PAX_RANDMMAP
106381+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
106382+#endif
106383+
106384 /* requesting a specific address */
106385 if (addr) {
106386 addr = PAGE_ALIGN(addr);
106387 vma = find_vma(mm, addr);
106388 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
106389- (!vma || addr + len <= vma->vm_start))
106390+ check_heap_stack_gap(vma, addr, len, offset))
106391 return addr;
106392 }
106393
106394@@ -1969,6 +2246,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
106395 info.low_limit = max(PAGE_SIZE, mmap_min_addr);
106396 info.high_limit = mm->mmap_base;
106397 info.align_mask = 0;
106398+ info.threadstack_offset = offset;
106399 addr = vm_unmapped_area(&info);
106400
106401 /*
106402@@ -1981,6 +2259,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
106403 VM_BUG_ON(addr != -ENOMEM);
106404 info.flags = 0;
106405 info.low_limit = TASK_UNMAPPED_BASE;
106406+
106407+#ifdef CONFIG_PAX_RANDMMAP
106408+ if (mm->pax_flags & MF_PAX_RANDMMAP)
106409+ info.low_limit += mm->delta_mmap;
106410+#endif
106411+
106412 info.high_limit = TASK_SIZE;
106413 addr = vm_unmapped_area(&info);
106414 }
106415@@ -2081,6 +2365,28 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
106416 return vma;
106417 }
106418
106419+#ifdef CONFIG_PAX_SEGMEXEC
106420+struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
106421+{
106422+ struct vm_area_struct *vma_m;
106423+
106424+ BUG_ON(!vma || vma->vm_start >= vma->vm_end);
106425+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
106426+ BUG_ON(vma->vm_mirror);
106427+ return NULL;
106428+ }
106429+ BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
106430+ vma_m = vma->vm_mirror;
106431+ BUG_ON(!vma_m || vma_m->vm_mirror != vma);
106432+ BUG_ON(vma->vm_file != vma_m->vm_file);
106433+ BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
106434+ BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
106435+ BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
106436+ BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED));
106437+ return vma_m;
106438+}
106439+#endif
106440+
106441 /*
106442 * Verify that the stack growth is acceptable and
106443 * update accounting. This is shared with both the
106444@@ -2098,8 +2404,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
106445
106446 /* Stack limit test */
106447 actual_size = size;
106448- if (size && (vma->vm_flags & (VM_GROWSUP | VM_GROWSDOWN)))
106449- actual_size -= PAGE_SIZE;
106450+ gr_learn_resource(current, RLIMIT_STACK, actual_size, 1);
106451 if (actual_size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
106452 return -ENOMEM;
106453
106454@@ -2110,6 +2415,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
106455 locked = mm->locked_vm + grow;
106456 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
106457 limit >>= PAGE_SHIFT;
106458+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
106459 if (locked > limit && !capable(CAP_IPC_LOCK))
106460 return -ENOMEM;
106461 }
106462@@ -2139,37 +2445,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
106463 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
106464 * vma is the last one with address > vma->vm_end. Have to extend vma.
106465 */
106466+#ifndef CONFIG_IA64
106467+static
106468+#endif
106469 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
106470 {
106471 int error;
106472+ bool locknext;
106473
106474 if (!(vma->vm_flags & VM_GROWSUP))
106475 return -EFAULT;
106476
106477+ /* Also guard against wrapping around to address 0. */
106478+ if (address < PAGE_ALIGN(address+1))
106479+ address = PAGE_ALIGN(address+1);
106480+ else
106481+ return -ENOMEM;
106482+
106483 /*
106484 * We must make sure the anon_vma is allocated
106485 * so that the anon_vma locking is not a noop.
106486 */
106487 if (unlikely(anon_vma_prepare(vma)))
106488 return -ENOMEM;
106489+ locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
106490+ if (locknext && anon_vma_prepare(vma->vm_next))
106491+ return -ENOMEM;
106492 vma_lock_anon_vma(vma);
106493+ if (locknext)
106494+ vma_lock_anon_vma(vma->vm_next);
106495
106496 /*
106497 * vma->vm_start/vm_end cannot change under us because the caller
106498 * is required to hold the mmap_sem in read mode. We need the
106499- * anon_vma lock to serialize against concurrent expand_stacks.
106500- * Also guard against wrapping around to address 0.
106501+ * anon_vma locks to serialize against concurrent expand_stacks
106502+ * and expand_upwards.
106503 */
106504- if (address < PAGE_ALIGN(address+4))
106505- address = PAGE_ALIGN(address+4);
106506- else {
106507- vma_unlock_anon_vma(vma);
106508- return -ENOMEM;
106509- }
106510 error = 0;
106511
106512 /* Somebody else might have raced and expanded it already */
106513- if (address > vma->vm_end) {
106514+ if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
106515+ error = -ENOMEM;
106516+ else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
106517 unsigned long size, grow;
106518
106519 size = address - vma->vm_start;
106520@@ -2204,6 +2521,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
106521 }
106522 }
106523 }
106524+ if (locknext)
106525+ vma_unlock_anon_vma(vma->vm_next);
106526 vma_unlock_anon_vma(vma);
106527 khugepaged_enter_vma_merge(vma, vma->vm_flags);
106528 validate_mm(vma->vm_mm);
106529@@ -2218,6 +2537,8 @@ int expand_downwards(struct vm_area_struct *vma,
106530 unsigned long address)
106531 {
106532 int error;
106533+ bool lockprev = false;
106534+ struct vm_area_struct *prev;
106535
106536 /*
106537 * We must make sure the anon_vma is allocated
106538@@ -2231,6 +2552,15 @@ int expand_downwards(struct vm_area_struct *vma,
106539 if (error)
106540 return error;
106541
106542+ prev = vma->vm_prev;
106543+#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
106544+ lockprev = prev && (prev->vm_flags & VM_GROWSUP);
106545+#endif
106546+ if (lockprev && anon_vma_prepare(prev))
106547+ return -ENOMEM;
106548+ if (lockprev)
106549+ vma_lock_anon_vma(prev);
106550+
106551 vma_lock_anon_vma(vma);
106552
106553 /*
106554@@ -2240,9 +2570,17 @@ int expand_downwards(struct vm_area_struct *vma,
106555 */
106556
106557 /* Somebody else might have raced and expanded it already */
106558- if (address < vma->vm_start) {
106559+ if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
106560+ error = -ENOMEM;
106561+ else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
106562 unsigned long size, grow;
106563
106564+#ifdef CONFIG_PAX_SEGMEXEC
106565+ struct vm_area_struct *vma_m;
106566+
106567+ vma_m = pax_find_mirror_vma(vma);
106568+#endif
106569+
106570 size = vma->vm_end - address;
106571 grow = (vma->vm_start - address) >> PAGE_SHIFT;
106572
106573@@ -2267,13 +2605,27 @@ int expand_downwards(struct vm_area_struct *vma,
106574 vma->vm_pgoff -= grow;
106575 anon_vma_interval_tree_post_update_vma(vma);
106576 vma_gap_update(vma);
106577+
106578+#ifdef CONFIG_PAX_SEGMEXEC
106579+ if (vma_m) {
106580+ anon_vma_interval_tree_pre_update_vma(vma_m);
106581+ vma_m->vm_start -= grow << PAGE_SHIFT;
106582+ vma_m->vm_pgoff -= grow;
106583+ anon_vma_interval_tree_post_update_vma(vma_m);
106584+ vma_gap_update(vma_m);
106585+ }
106586+#endif
106587+
106588 spin_unlock(&vma->vm_mm->page_table_lock);
106589
106590+ track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
106591 perf_event_mmap(vma);
106592 }
106593 }
106594 }
106595 vma_unlock_anon_vma(vma);
106596+ if (lockprev)
106597+ vma_unlock_anon_vma(prev);
106598 khugepaged_enter_vma_merge(vma, vma->vm_flags);
106599 validate_mm(vma->vm_mm);
106600 return error;
106601@@ -2373,6 +2725,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
106602 do {
106603 long nrpages = vma_pages(vma);
106604
106605+#ifdef CONFIG_PAX_SEGMEXEC
106606+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
106607+ vma = remove_vma(vma);
106608+ continue;
106609+ }
106610+#endif
106611+
106612 if (vma->vm_flags & VM_ACCOUNT)
106613 nr_accounted += nrpages;
106614 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
106615@@ -2417,6 +2776,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
106616 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
106617 vma->vm_prev = NULL;
106618 do {
106619+
106620+#ifdef CONFIG_PAX_SEGMEXEC
106621+ if (vma->vm_mirror) {
106622+ BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
106623+ vma->vm_mirror->vm_mirror = NULL;
106624+ vma->vm_mirror->vm_flags &= ~VM_EXEC;
106625+ vma->vm_mirror = NULL;
106626+ }
106627+#endif
106628+
106629 vma_rb_erase(vma, &mm->mm_rb);
106630 mm->map_count--;
106631 tail_vma = vma;
106632@@ -2444,14 +2813,33 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
106633 struct vm_area_struct *new;
106634 int err = -ENOMEM;
106635
106636+#ifdef CONFIG_PAX_SEGMEXEC
106637+ struct vm_area_struct *vma_m, *new_m = NULL;
106638+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
106639+#endif
106640+
106641 if (is_vm_hugetlb_page(vma) && (addr &
106642 ~(huge_page_mask(hstate_vma(vma)))))
106643 return -EINVAL;
106644
106645+#ifdef CONFIG_PAX_SEGMEXEC
106646+ vma_m = pax_find_mirror_vma(vma);
106647+#endif
106648+
106649 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
106650 if (!new)
106651 goto out_err;
106652
106653+#ifdef CONFIG_PAX_SEGMEXEC
106654+ if (vma_m) {
106655+ new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
106656+ if (!new_m) {
106657+ kmem_cache_free(vm_area_cachep, new);
106658+ goto out_err;
106659+ }
106660+ }
106661+#endif
106662+
106663 /* most fields are the same, copy all, and then fixup */
106664 *new = *vma;
106665
106666@@ -2464,6 +2852,22 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
106667 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
106668 }
106669
106670+#ifdef CONFIG_PAX_SEGMEXEC
106671+ if (vma_m) {
106672+ *new_m = *vma_m;
106673+ INIT_LIST_HEAD(&new_m->anon_vma_chain);
106674+ new_m->vm_mirror = new;
106675+ new->vm_mirror = new_m;
106676+
106677+ if (new_below)
106678+ new_m->vm_end = addr_m;
106679+ else {
106680+ new_m->vm_start = addr_m;
106681+ new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
106682+ }
106683+ }
106684+#endif
106685+
106686 err = vma_dup_policy(vma, new);
106687 if (err)
106688 goto out_free_vma;
106689@@ -2484,6 +2888,38 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
106690 else
106691 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
106692
106693+#ifdef CONFIG_PAX_SEGMEXEC
106694+ if (!err && vma_m) {
106695+ struct mempolicy *pol = vma_policy(new);
106696+
106697+ if (anon_vma_clone(new_m, vma_m))
106698+ goto out_free_mpol;
106699+
106700+ mpol_get(pol);
106701+ set_vma_policy(new_m, pol);
106702+
106703+ if (new_m->vm_file)
106704+ get_file(new_m->vm_file);
106705+
106706+ if (new_m->vm_ops && new_m->vm_ops->open)
106707+ new_m->vm_ops->open(new_m);
106708+
106709+ if (new_below)
106710+ err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
106711+ ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
106712+ else
106713+ err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
106714+
106715+ if (err) {
106716+ if (new_m->vm_ops && new_m->vm_ops->close)
106717+ new_m->vm_ops->close(new_m);
106718+ if (new_m->vm_file)
106719+ fput(new_m->vm_file);
106720+ mpol_put(pol);
106721+ }
106722+ }
106723+#endif
106724+
106725 /* Success. */
106726 if (!err)
106727 return 0;
106728@@ -2493,10 +2929,18 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
106729 new->vm_ops->close(new);
106730 if (new->vm_file)
106731 fput(new->vm_file);
106732- unlink_anon_vmas(new);
106733 out_free_mpol:
106734 mpol_put(vma_policy(new));
106735 out_free_vma:
106736+
106737+#ifdef CONFIG_PAX_SEGMEXEC
106738+ if (new_m) {
106739+ unlink_anon_vmas(new_m);
106740+ kmem_cache_free(vm_area_cachep, new_m);
106741+ }
106742+#endif
106743+
106744+ unlink_anon_vmas(new);
106745 kmem_cache_free(vm_area_cachep, new);
106746 out_err:
106747 return err;
106748@@ -2509,6 +2953,15 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
106749 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
106750 unsigned long addr, int new_below)
106751 {
106752+
106753+#ifdef CONFIG_PAX_SEGMEXEC
106754+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
106755+ BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
106756+ if (mm->map_count >= sysctl_max_map_count-1)
106757+ return -ENOMEM;
106758+ } else
106759+#endif
106760+
106761 if (mm->map_count >= sysctl_max_map_count)
106762 return -ENOMEM;
106763
106764@@ -2520,11 +2973,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
106765 * work. This now handles partial unmappings.
106766 * Jeremy Fitzhardinge <jeremy@goop.org>
106767 */
106768+#ifdef CONFIG_PAX_SEGMEXEC
106769 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
106770 {
106771+ int ret = __do_munmap(mm, start, len);
106772+ if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
106773+ return ret;
106774+
106775+ return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
106776+}
106777+
106778+int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
106779+#else
106780+int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
106781+#endif
106782+{
106783 unsigned long end;
106784 struct vm_area_struct *vma, *prev, *last;
106785
106786+ /*
106787+ * mm->mmap_sem is required to protect against another thread
106788+ * changing the mappings in case we sleep.
106789+ */
106790+ verify_mm_writelocked(mm);
106791+
106792 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
106793 return -EINVAL;
106794
106795@@ -2602,6 +3074,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
106796 /* Fix up all other VM information */
106797 remove_vma_list(mm, vma);
106798
106799+ track_exec_limit(mm, start, end, 0UL);
106800+
106801 return 0;
106802 }
106803
106804@@ -2610,6 +3084,13 @@ int vm_munmap(unsigned long start, size_t len)
106805 int ret;
106806 struct mm_struct *mm = current->mm;
106807
106808+
106809+#ifdef CONFIG_PAX_SEGMEXEC
106810+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
106811+ (len > SEGMEXEC_TASK_SIZE || start > SEGMEXEC_TASK_SIZE-len))
106812+ return -EINVAL;
106813+#endif
106814+
106815 down_write(&mm->mmap_sem);
106816 ret = do_munmap(mm, start, len);
106817 up_write(&mm->mmap_sem);
106818@@ -2656,6 +3137,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
106819 down_write(&mm->mmap_sem);
106820 vma = find_vma(mm, start);
106821
106822+#ifdef CONFIG_PAX_SEGMEXEC
106823+ if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
106824+ goto out;
106825+#endif
106826+
106827 if (!vma || !(vma->vm_flags & VM_SHARED))
106828 goto out;
106829
106830@@ -2692,16 +3178,6 @@ out:
106831 return ret;
106832 }
106833
106834-static inline void verify_mm_writelocked(struct mm_struct *mm)
106835-{
106836-#ifdef CONFIG_DEBUG_VM
106837- if (unlikely(down_read_trylock(&mm->mmap_sem))) {
106838- WARN_ON(1);
106839- up_read(&mm->mmap_sem);
106840- }
106841-#endif
106842-}
106843-
106844 /*
106845 * this is really a simplified "do_mmap". it only handles
106846 * anonymous maps. eventually we may be able to do some
106847@@ -2715,6 +3191,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
106848 struct rb_node **rb_link, *rb_parent;
106849 pgoff_t pgoff = addr >> PAGE_SHIFT;
106850 int error;
106851+ unsigned long charged;
106852
106853 len = PAGE_ALIGN(len);
106854 if (!len)
106855@@ -2722,10 +3199,24 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
106856
106857 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
106858
106859+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
106860+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
106861+ flags &= ~VM_EXEC;
106862+
106863+#ifdef CONFIG_PAX_MPROTECT
106864+ if (mm->pax_flags & MF_PAX_MPROTECT)
106865+ flags &= ~VM_MAYEXEC;
106866+#endif
106867+
106868+ }
106869+#endif
106870+
106871 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
106872 if (error & ~PAGE_MASK)
106873 return error;
106874
106875+ charged = len >> PAGE_SHIFT;
106876+
106877 error = mlock_future_check(mm, mm->def_flags, len);
106878 if (error)
106879 return error;
106880@@ -2739,21 +3230,20 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
106881 /*
106882 * Clear old maps. this also does some error checking for us
106883 */
106884- munmap_back:
106885 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
106886 if (do_munmap(mm, addr, len))
106887 return -ENOMEM;
106888- goto munmap_back;
106889+ BUG_ON(find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent));
106890 }
106891
106892 /* Check against address space limits *after* clearing old maps... */
106893- if (!may_expand_vm(mm, len >> PAGE_SHIFT))
106894+ if (!may_expand_vm(mm, charged))
106895 return -ENOMEM;
106896
106897 if (mm->map_count > sysctl_max_map_count)
106898 return -ENOMEM;
106899
106900- if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
106901+ if (security_vm_enough_memory_mm(mm, charged))
106902 return -ENOMEM;
106903
106904 /* Can we just expand an old private anonymous mapping? */
106905@@ -2767,7 +3257,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
106906 */
106907 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
106908 if (!vma) {
106909- vm_unacct_memory(len >> PAGE_SHIFT);
106910+ vm_unacct_memory(charged);
106911 return -ENOMEM;
106912 }
106913
106914@@ -2781,10 +3271,11 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
106915 vma_link(mm, vma, prev, rb_link, rb_parent);
106916 out:
106917 perf_event_mmap(vma);
106918- mm->total_vm += len >> PAGE_SHIFT;
106919+ mm->total_vm += charged;
106920 if (flags & VM_LOCKED)
106921- mm->locked_vm += (len >> PAGE_SHIFT);
106922+ mm->locked_vm += charged;
106923 vma->vm_flags |= VM_SOFTDIRTY;
106924+ track_exec_limit(mm, addr, addr + len, flags);
106925 return addr;
106926 }
106927
106928@@ -2846,6 +3337,7 @@ void exit_mmap(struct mm_struct *mm)
106929 while (vma) {
106930 if (vma->vm_flags & VM_ACCOUNT)
106931 nr_accounted += vma_pages(vma);
106932+ vma->vm_mirror = NULL;
106933 vma = remove_vma(vma);
106934 }
106935 vm_unacct_memory(nr_accounted);
106936@@ -2860,6 +3352,13 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
106937 struct vm_area_struct *prev;
106938 struct rb_node **rb_link, *rb_parent;
106939
106940+#ifdef CONFIG_PAX_SEGMEXEC
106941+ struct vm_area_struct *vma_m = NULL;
106942+#endif
106943+
106944+ if (security_mmap_addr(vma->vm_start))
106945+ return -EPERM;
106946+
106947 /*
106948 * The vm_pgoff of a purely anonymous vma should be irrelevant
106949 * until its first write fault, when page's anon_vma and index
106950@@ -2883,7 +3382,21 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
106951 security_vm_enough_memory_mm(mm, vma_pages(vma)))
106952 return -ENOMEM;
106953
106954+#ifdef CONFIG_PAX_SEGMEXEC
106955+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
106956+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
106957+ if (!vma_m)
106958+ return -ENOMEM;
106959+ }
106960+#endif
106961+
106962 vma_link(mm, vma, prev, rb_link, rb_parent);
106963+
106964+#ifdef CONFIG_PAX_SEGMEXEC
106965+ if (vma_m)
106966+ BUG_ON(pax_mirror_vma(vma_m, vma));
106967+#endif
106968+
106969 return 0;
106970 }
106971
106972@@ -2902,6 +3415,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
106973 struct rb_node **rb_link, *rb_parent;
106974 bool faulted_in_anon_vma = true;
106975
106976+ BUG_ON(vma->vm_mirror);
106977+
106978 /*
106979 * If anonymous vma has not yet been faulted, update new pgoff
106980 * to match new location, to increase its chance of merging.
106981@@ -2966,6 +3481,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
106982 return NULL;
106983 }
106984
106985+#ifdef CONFIG_PAX_SEGMEXEC
106986+long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
106987+{
106988+ struct vm_area_struct *prev_m;
106989+ struct rb_node **rb_link_m, *rb_parent_m;
106990+ struct mempolicy *pol_m;
106991+
106992+ BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
106993+ BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
106994+ BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
106995+ *vma_m = *vma;
106996+ INIT_LIST_HEAD(&vma_m->anon_vma_chain);
106997+ if (anon_vma_clone(vma_m, vma))
106998+ return -ENOMEM;
106999+ pol_m = vma_policy(vma_m);
107000+ mpol_get(pol_m);
107001+ set_vma_policy(vma_m, pol_m);
107002+ vma_m->vm_start += SEGMEXEC_TASK_SIZE;
107003+ vma_m->vm_end += SEGMEXEC_TASK_SIZE;
107004+ vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
107005+ vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
107006+ if (vma_m->vm_file)
107007+ get_file(vma_m->vm_file);
107008+ if (vma_m->vm_ops && vma_m->vm_ops->open)
107009+ vma_m->vm_ops->open(vma_m);
107010+ BUG_ON(find_vma_links(vma->vm_mm, vma_m->vm_start, vma_m->vm_end, &prev_m, &rb_link_m, &rb_parent_m));
107011+ vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
107012+ vma_m->vm_mirror = vma;
107013+ vma->vm_mirror = vma_m;
107014+ return 0;
107015+}
107016+#endif
107017+
107018 /*
107019 * Return true if the calling process may expand its vm space by the passed
107020 * number of pages
107021@@ -2977,6 +3525,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
107022
107023 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
107024
107025+ gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
107026 if (cur + npages > lim)
107027 return 0;
107028 return 1;
107029@@ -3059,6 +3608,22 @@ static struct vm_area_struct *__install_special_mapping(
107030 vma->vm_start = addr;
107031 vma->vm_end = addr + len;
107032
107033+#ifdef CONFIG_PAX_MPROTECT
107034+ if (mm->pax_flags & MF_PAX_MPROTECT) {
107035+#ifndef CONFIG_PAX_MPROTECT_COMPAT
107036+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
107037+ return ERR_PTR(-EPERM);
107038+ if (!(vm_flags & VM_EXEC))
107039+ vm_flags &= ~VM_MAYEXEC;
107040+#else
107041+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
107042+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
107043+#endif
107044+ else
107045+ vm_flags &= ~VM_MAYWRITE;
107046+ }
107047+#endif
107048+
107049 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND | VM_SOFTDIRTY;
107050 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
107051
107052diff --git a/mm/mprotect.c b/mm/mprotect.c
107053index 8858483..72f2464 100644
107054--- a/mm/mprotect.c
107055+++ b/mm/mprotect.c
107056@@ -24,10 +24,18 @@
107057 #include <linux/migrate.h>
107058 #include <linux/perf_event.h>
107059 #include <linux/ksm.h>
107060+#include <linux/sched/sysctl.h>
107061+
107062+#ifdef CONFIG_PAX_MPROTECT
107063+#include <linux/elf.h>
107064+#include <linux/binfmts.h>
107065+#endif
107066+
107067 #include <asm/uaccess.h>
107068 #include <asm/pgtable.h>
107069 #include <asm/cacheflush.h>
107070 #include <asm/tlbflush.h>
107071+#include <asm/mmu_context.h>
107072
107073 /*
107074 * For a prot_numa update we only hold mmap_sem for read so there is a
107075@@ -252,6 +260,48 @@ unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
107076 return pages;
107077 }
107078
107079+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
107080+/* called while holding the mmap semaphor for writing except stack expansion */
107081+void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
107082+{
107083+ unsigned long oldlimit, newlimit = 0UL;
107084+
107085+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
107086+ return;
107087+
107088+ spin_lock(&mm->page_table_lock);
107089+ oldlimit = mm->context.user_cs_limit;
107090+ if ((prot & VM_EXEC) && oldlimit < end)
107091+ /* USER_CS limit moved up */
107092+ newlimit = end;
107093+ else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
107094+ /* USER_CS limit moved down */
107095+ newlimit = start;
107096+
107097+ if (newlimit) {
107098+ mm->context.user_cs_limit = newlimit;
107099+
107100+#ifdef CONFIG_SMP
107101+ wmb();
107102+ cpumask_clear(&mm->context.cpu_user_cs_mask);
107103+ cpumask_set_cpu(smp_processor_id(), &mm->context.cpu_user_cs_mask);
107104+#endif
107105+
107106+ set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
107107+ }
107108+ spin_unlock(&mm->page_table_lock);
107109+ if (newlimit == end) {
107110+ struct vm_area_struct *vma = find_vma(mm, oldlimit);
107111+
107112+ for (; vma && vma->vm_start < end; vma = vma->vm_next)
107113+ if (is_vm_hugetlb_page(vma))
107114+ hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
107115+ else
107116+ change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma), 0);
107117+ }
107118+}
107119+#endif
107120+
107121 int
107122 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
107123 unsigned long start, unsigned long end, unsigned long newflags)
107124@@ -264,11 +314,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
107125 int error;
107126 int dirty_accountable = 0;
107127
107128+#ifdef CONFIG_PAX_SEGMEXEC
107129+ struct vm_area_struct *vma_m = NULL;
107130+ unsigned long start_m, end_m;
107131+
107132+ start_m = start + SEGMEXEC_TASK_SIZE;
107133+ end_m = end + SEGMEXEC_TASK_SIZE;
107134+#endif
107135+
107136 if (newflags == oldflags) {
107137 *pprev = vma;
107138 return 0;
107139 }
107140
107141+ if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
107142+ struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
107143+
107144+ if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
107145+ return -ENOMEM;
107146+
107147+ if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
107148+ return -ENOMEM;
107149+ }
107150+
107151 /*
107152 * If we make a private mapping writable we increase our commit;
107153 * but (without finer accounting) cannot reduce our commit if we
107154@@ -285,6 +353,42 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
107155 }
107156 }
107157
107158+#ifdef CONFIG_PAX_SEGMEXEC
107159+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
107160+ if (start != vma->vm_start) {
107161+ error = split_vma(mm, vma, start, 1);
107162+ if (error)
107163+ goto fail;
107164+ BUG_ON(!*pprev || (*pprev)->vm_next == vma);
107165+ *pprev = (*pprev)->vm_next;
107166+ }
107167+
107168+ if (end != vma->vm_end) {
107169+ error = split_vma(mm, vma, end, 0);
107170+ if (error)
107171+ goto fail;
107172+ }
107173+
107174+ if (pax_find_mirror_vma(vma)) {
107175+ error = __do_munmap(mm, start_m, end_m - start_m);
107176+ if (error)
107177+ goto fail;
107178+ } else {
107179+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
107180+ if (!vma_m) {
107181+ error = -ENOMEM;
107182+ goto fail;
107183+ }
107184+ vma->vm_flags = newflags;
107185+ error = pax_mirror_vma(vma_m, vma);
107186+ if (error) {
107187+ vma->vm_flags = oldflags;
107188+ goto fail;
107189+ }
107190+ }
107191+ }
107192+#endif
107193+
107194 /*
107195 * First try to merge with previous and/or next vma.
107196 */
107197@@ -315,7 +419,19 @@ success:
107198 * vm_flags and vm_page_prot are protected by the mmap_sem
107199 * held in write mode.
107200 */
107201+
107202+#ifdef CONFIG_PAX_SEGMEXEC
107203+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
107204+ pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
107205+#endif
107206+
107207 vma->vm_flags = newflags;
107208+
107209+#ifdef CONFIG_PAX_MPROTECT
107210+ if (mm->binfmt && mm->binfmt->handle_mprotect)
107211+ mm->binfmt->handle_mprotect(vma, newflags);
107212+#endif
107213+
107214 dirty_accountable = vma_wants_writenotify(vma);
107215 vma_set_page_prot(vma);
107216
107217@@ -351,6 +467,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
107218 end = start + len;
107219 if (end <= start)
107220 return -ENOMEM;
107221+
107222+#ifdef CONFIG_PAX_SEGMEXEC
107223+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
107224+ if (end > SEGMEXEC_TASK_SIZE)
107225+ return -EINVAL;
107226+ } else
107227+#endif
107228+
107229+ if (end > TASK_SIZE)
107230+ return -EINVAL;
107231+
107232 if (!arch_validate_prot(prot))
107233 return -EINVAL;
107234
107235@@ -358,7 +485,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
107236 /*
107237 * Does the application expect PROT_READ to imply PROT_EXEC:
107238 */
107239- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
107240+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
107241 prot |= PROT_EXEC;
107242
107243 vm_flags = calc_vm_prot_bits(prot);
107244@@ -390,6 +517,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
107245 if (start > vma->vm_start)
107246 prev = vma;
107247
107248+#ifdef CONFIG_PAX_MPROTECT
107249+ if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
107250+ current->mm->binfmt->handle_mprotect(vma, vm_flags);
107251+#endif
107252+
107253 for (nstart = start ; ; ) {
107254 unsigned long newflags;
107255
107256@@ -400,6 +532,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
107257
107258 /* newflags >> 4 shift VM_MAY% in place of VM_% */
107259 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
107260+ if (prot & (PROT_WRITE | PROT_EXEC))
107261+ gr_log_rwxmprotect(vma);
107262+
107263+ error = -EACCES;
107264+ goto out;
107265+ }
107266+
107267+ if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
107268 error = -EACCES;
107269 goto out;
107270 }
107271@@ -414,6 +554,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
107272 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
107273 if (error)
107274 goto out;
107275+
107276+ track_exec_limit(current->mm, nstart, tmp, vm_flags);
107277+
107278 nstart = tmp;
107279
107280 if (nstart < prev->vm_end)
107281diff --git a/mm/mremap.c b/mm/mremap.c
107282index 2dc44b1..caa1819 100644
107283--- a/mm/mremap.c
107284+++ b/mm/mremap.c
107285@@ -142,6 +142,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
107286 continue;
107287 pte = ptep_get_and_clear(mm, old_addr, old_pte);
107288 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
107289+
107290+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
107291+ if (!(__supported_pte_mask & _PAGE_NX) && pte_present(pte) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
107292+ pte = pte_exprotect(pte);
107293+#endif
107294+
107295 pte = move_soft_dirty_pte(pte);
107296 set_pte_at(mm, new_addr, new_pte, pte);
107297 }
107298@@ -350,6 +356,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
107299 if (is_vm_hugetlb_page(vma))
107300 goto Einval;
107301
107302+#ifdef CONFIG_PAX_SEGMEXEC
107303+ if (pax_find_mirror_vma(vma))
107304+ goto Einval;
107305+#endif
107306+
107307 /* We can't remap across vm area boundaries */
107308 if (old_len > vma->vm_end - addr)
107309 goto Efault;
107310@@ -405,20 +416,25 @@ static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
107311 unsigned long ret = -EINVAL;
107312 unsigned long charged = 0;
107313 unsigned long map_flags;
107314+ unsigned long pax_task_size = TASK_SIZE;
107315
107316 if (new_addr & ~PAGE_MASK)
107317 goto out;
107318
107319- if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
107320+#ifdef CONFIG_PAX_SEGMEXEC
107321+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
107322+ pax_task_size = SEGMEXEC_TASK_SIZE;
107323+#endif
107324+
107325+ pax_task_size -= PAGE_SIZE;
107326+
107327+ if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
107328 goto out;
107329
107330 /* Check if the location we're moving into overlaps the
107331 * old location at all, and fail if it does.
107332 */
107333- if ((new_addr <= addr) && (new_addr+new_len) > addr)
107334- goto out;
107335-
107336- if ((addr <= new_addr) && (addr+old_len) > new_addr)
107337+ if (addr + old_len > new_addr && new_addr + new_len > addr)
107338 goto out;
107339
107340 ret = do_munmap(mm, new_addr, new_len);
107341@@ -487,6 +503,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
107342 unsigned long ret = -EINVAL;
107343 unsigned long charged = 0;
107344 bool locked = false;
107345+ unsigned long pax_task_size = TASK_SIZE;
107346
107347 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
107348 return ret;
107349@@ -508,6 +525,17 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
107350 if (!new_len)
107351 return ret;
107352
107353+#ifdef CONFIG_PAX_SEGMEXEC
107354+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
107355+ pax_task_size = SEGMEXEC_TASK_SIZE;
107356+#endif
107357+
107358+ pax_task_size -= PAGE_SIZE;
107359+
107360+ if (new_len > pax_task_size || addr > pax_task_size-new_len ||
107361+ old_len > pax_task_size || addr > pax_task_size-old_len)
107362+ return ret;
107363+
107364 down_write(&current->mm->mmap_sem);
107365
107366 if (flags & MREMAP_FIXED) {
107367@@ -558,6 +586,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
107368 new_addr = addr;
107369 }
107370 ret = addr;
107371+ track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
107372 goto out;
107373 }
107374 }
107375@@ -581,7 +610,12 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
107376 goto out;
107377 }
107378
107379+ map_flags = vma->vm_flags;
107380 ret = move_vma(vma, addr, old_len, new_len, new_addr, &locked);
107381+ if (!(ret & ~PAGE_MASK)) {
107382+ track_exec_limit(current->mm, addr, addr + old_len, 0UL);
107383+ track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
107384+ }
107385 }
107386 out:
107387 if (ret & ~PAGE_MASK)
107388diff --git a/mm/nommu.c b/mm/nommu.c
107389index 3fba2dc9..fdad748 100644
107390--- a/mm/nommu.c
107391+++ b/mm/nommu.c
107392@@ -72,7 +72,6 @@ int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
107393 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
107394 unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
107395 unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
107396-int heap_stack_gap = 0;
107397
107398 atomic_long_t mmap_pages_allocated;
107399
107400@@ -892,15 +891,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
107401 EXPORT_SYMBOL(find_vma);
107402
107403 /*
107404- * find a VMA
107405- * - we don't extend stack VMAs under NOMMU conditions
107406- */
107407-struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
107408-{
107409- return find_vma(mm, addr);
107410-}
107411-
107412-/*
107413 * expand a stack to a given address
107414 * - not supported under NOMMU conditions
107415 */
107416@@ -1585,6 +1575,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
107417
107418 /* most fields are the same, copy all, and then fixup */
107419 *new = *vma;
107420+ INIT_LIST_HEAD(&new->anon_vma_chain);
107421 *region = *vma->vm_region;
107422 new->vm_region = region;
107423
107424@@ -2007,8 +1998,8 @@ void filemap_map_pages(struct vm_area_struct *vma, struct vm_fault *vmf)
107425 }
107426 EXPORT_SYMBOL(filemap_map_pages);
107427
107428-static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
107429- unsigned long addr, void *buf, int len, int write)
107430+static ssize_t __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
107431+ unsigned long addr, void *buf, size_t len, int write)
107432 {
107433 struct vm_area_struct *vma;
107434
107435@@ -2049,8 +2040,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
107436 *
107437 * The caller must hold a reference on @mm.
107438 */
107439-int access_remote_vm(struct mm_struct *mm, unsigned long addr,
107440- void *buf, int len, int write)
107441+ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
107442+ void *buf, size_t len, int write)
107443 {
107444 return __access_remote_vm(NULL, mm, addr, buf, len, write);
107445 }
107446@@ -2059,7 +2050,7 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
107447 * Access another process' address space.
107448 * - source/target buffer must be kernel space
107449 */
107450-int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
107451+ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, size_t len, int write)
107452 {
107453 struct mm_struct *mm;
107454
107455diff --git a/mm/page-writeback.c b/mm/page-writeback.c
107456index ad05f2f..cee723a 100644
107457--- a/mm/page-writeback.c
107458+++ b/mm/page-writeback.c
107459@@ -664,7 +664,7 @@ static long long pos_ratio_polynom(unsigned long setpoint,
107460 * card's bdi_dirty may rush to many times higher than bdi_setpoint.
107461 * - the bdi dirty thresh drops quickly due to change of JBOD workload
107462 */
107463-static unsigned long bdi_position_ratio(struct backing_dev_info *bdi,
107464+static unsigned long __intentional_overflow(-1) bdi_position_ratio(struct backing_dev_info *bdi,
107465 unsigned long thresh,
107466 unsigned long bg_thresh,
107467 unsigned long dirty,
107468diff --git a/mm/page_alloc.c b/mm/page_alloc.c
107469index 40e2942..0eb29a2 100644
107470--- a/mm/page_alloc.c
107471+++ b/mm/page_alloc.c
107472@@ -61,6 +61,7 @@
107473 #include <linux/hugetlb.h>
107474 #include <linux/sched/rt.h>
107475 #include <linux/page_owner.h>
107476+#include <linux/random.h>
107477
107478 #include <asm/sections.h>
107479 #include <asm/tlbflush.h>
107480@@ -357,7 +358,7 @@ out:
107481 * This usage means that zero-order pages may not be compound.
107482 */
107483
107484-static void free_compound_page(struct page *page)
107485+void free_compound_page(struct page *page)
107486 {
107487 __free_pages_ok(page, compound_order(page));
107488 }
107489@@ -480,7 +481,7 @@ static inline void clear_page_guard(struct zone *zone, struct page *page,
107490 __mod_zone_freepage_state(zone, (1 << order), migratetype);
107491 }
107492 #else
107493-struct page_ext_operations debug_guardpage_ops = { NULL, };
107494+struct page_ext_operations debug_guardpage_ops = { .need = NULL, .init = NULL };
107495 static inline void set_page_guard(struct zone *zone, struct page *page,
107496 unsigned int order, int migratetype) {}
107497 static inline void clear_page_guard(struct zone *zone, struct page *page,
107498@@ -783,6 +784,10 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
107499 bool compound = PageCompound(page);
107500 int i, bad = 0;
107501
107502+#ifdef CONFIG_PAX_MEMORY_SANITIZE
107503+ unsigned long index = 1UL << order;
107504+#endif
107505+
107506 VM_BUG_ON_PAGE(PageTail(page), page);
107507 VM_BUG_ON_PAGE(compound && compound_order(page) != order, page);
107508
107509@@ -809,6 +814,12 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
107510 debug_check_no_obj_freed(page_address(page),
107511 PAGE_SIZE << order);
107512 }
107513+
107514+#ifdef CONFIG_PAX_MEMORY_SANITIZE
107515+ for (; index; --index)
107516+ sanitize_highpage(page + index - 1);
107517+#endif
107518+
107519 arch_free_page(page, order);
107520 kernel_map_pages(page, 1 << order, 0);
107521
107522@@ -832,6 +843,20 @@ static void __free_pages_ok(struct page *page, unsigned int order)
107523 local_irq_restore(flags);
107524 }
107525
107526+#ifdef CONFIG_PAX_LATENT_ENTROPY
107527+bool __meminitdata extra_latent_entropy;
107528+
107529+static int __init setup_pax_extra_latent_entropy(char *str)
107530+{
107531+ extra_latent_entropy = true;
107532+ return 0;
107533+}
107534+early_param("pax_extra_latent_entropy", setup_pax_extra_latent_entropy);
107535+
107536+volatile u64 latent_entropy __latent_entropy;
107537+EXPORT_SYMBOL(latent_entropy);
107538+#endif
107539+
107540 void __init __free_pages_bootmem(struct page *page, unsigned int order)
107541 {
107542 unsigned int nr_pages = 1 << order;
107543@@ -847,6 +872,19 @@ void __init __free_pages_bootmem(struct page *page, unsigned int order)
107544 __ClearPageReserved(p);
107545 set_page_count(p, 0);
107546
107547+#ifdef CONFIG_PAX_LATENT_ENTROPY
107548+ if (extra_latent_entropy && !PageHighMem(page) && page_to_pfn(page) < 0x100000) {
107549+ u64 hash = 0;
107550+ size_t index, end = PAGE_SIZE * nr_pages / sizeof hash;
107551+ const u64 *data = lowmem_page_address(page);
107552+
107553+ for (index = 0; index < end; index++)
107554+ hash ^= hash + data[index];
107555+ latent_entropy ^= hash;
107556+ add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
107557+ }
107558+#endif
107559+
107560 page_zone(page)->managed_pages += nr_pages;
107561 set_page_refcounted(page);
107562 __free_pages(page, order);
107563@@ -974,8 +1012,10 @@ static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
107564 kernel_map_pages(page, 1 << order, 1);
107565 kasan_alloc_pages(page, order);
107566
107567+#ifndef CONFIG_PAX_MEMORY_SANITIZE
107568 if (gfp_flags & __GFP_ZERO)
107569 prep_zero_page(page, order, gfp_flags);
107570+#endif
107571
107572 if (order && (gfp_flags & __GFP_COMP))
107573 prep_compound_page(page, order);
107574@@ -1699,7 +1739,7 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
107575 }
107576
107577 __mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order));
107578- if (atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]) <= 0 &&
107579+ if (atomic_long_read_unchecked(&zone->vm_stat[NR_ALLOC_BATCH]) <= 0 &&
107580 !test_bit(ZONE_FAIR_DEPLETED, &zone->flags))
107581 set_bit(ZONE_FAIR_DEPLETED, &zone->flags);
107582
107583@@ -2018,7 +2058,7 @@ static void reset_alloc_batches(struct zone *preferred_zone)
107584 do {
107585 mod_zone_page_state(zone, NR_ALLOC_BATCH,
107586 high_wmark_pages(zone) - low_wmark_pages(zone) -
107587- atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]));
107588+ atomic_long_read_unchecked(&zone->vm_stat[NR_ALLOC_BATCH]));
107589 clear_bit(ZONE_FAIR_DEPLETED, &zone->flags);
107590 } while (zone++ != preferred_zone);
107591 }
107592@@ -5738,7 +5778,7 @@ static void __setup_per_zone_wmarks(void)
107593
107594 __mod_zone_page_state(zone, NR_ALLOC_BATCH,
107595 high_wmark_pages(zone) - low_wmark_pages(zone) -
107596- atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]));
107597+ atomic_long_read_unchecked(&zone->vm_stat[NR_ALLOC_BATCH]));
107598
107599 setup_zone_migrate_reserve(zone);
107600 spin_unlock_irqrestore(&zone->lock, flags);
107601diff --git a/mm/percpu.c b/mm/percpu.c
107602index 73c97a5..508ee25 100644
107603--- a/mm/percpu.c
107604+++ b/mm/percpu.c
107605@@ -131,7 +131,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
107606 static unsigned int pcpu_high_unit_cpu __read_mostly;
107607
107608 /* the address of the first chunk which starts with the kernel static area */
107609-void *pcpu_base_addr __read_mostly;
107610+void *pcpu_base_addr __read_only;
107611 EXPORT_SYMBOL_GPL(pcpu_base_addr);
107612
107613 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
107614diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
107615index b159769..d07037f 100644
107616--- a/mm/process_vm_access.c
107617+++ b/mm/process_vm_access.c
107618@@ -13,6 +13,7 @@
107619 #include <linux/uio.h>
107620 #include <linux/sched.h>
107621 #include <linux/highmem.h>
107622+#include <linux/security.h>
107623 #include <linux/ptrace.h>
107624 #include <linux/slab.h>
107625 #include <linux/syscalls.h>
107626@@ -154,19 +155,19 @@ static ssize_t process_vm_rw_core(pid_t pid, struct iov_iter *iter,
107627 ssize_t iov_len;
107628 size_t total_len = iov_iter_count(iter);
107629
107630+ return -ENOSYS; // PaX: until properly audited
107631+
107632 /*
107633 * Work out how many pages of struct pages we're going to need
107634 * when eventually calling get_user_pages
107635 */
107636 for (i = 0; i < riovcnt; i++) {
107637 iov_len = rvec[i].iov_len;
107638- if (iov_len > 0) {
107639- nr_pages_iov = ((unsigned long)rvec[i].iov_base
107640- + iov_len)
107641- / PAGE_SIZE - (unsigned long)rvec[i].iov_base
107642- / PAGE_SIZE + 1;
107643- nr_pages = max(nr_pages, nr_pages_iov);
107644- }
107645+ if (iov_len <= 0)
107646+ continue;
107647+ nr_pages_iov = ((unsigned long)rvec[i].iov_base + iov_len) / PAGE_SIZE -
107648+ (unsigned long)rvec[i].iov_base / PAGE_SIZE + 1;
107649+ nr_pages = max(nr_pages, nr_pages_iov);
107650 }
107651
107652 if (nr_pages == 0)
107653@@ -194,6 +195,11 @@ static ssize_t process_vm_rw_core(pid_t pid, struct iov_iter *iter,
107654 goto free_proc_pages;
107655 }
107656
107657+ if (gr_handle_ptrace(task, vm_write ? PTRACE_POKETEXT : PTRACE_ATTACH)) {
107658+ rc = -EPERM;
107659+ goto put_task_struct;
107660+ }
107661+
107662 mm = mm_access(task, PTRACE_MODE_ATTACH);
107663 if (!mm || IS_ERR(mm)) {
107664 rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
107665diff --git a/mm/rmap.c b/mm/rmap.c
107666index c161a14..8a069bb 100644
107667--- a/mm/rmap.c
107668+++ b/mm/rmap.c
107669@@ -166,6 +166,10 @@ int anon_vma_prepare(struct vm_area_struct *vma)
107670 struct anon_vma *anon_vma = vma->anon_vma;
107671 struct anon_vma_chain *avc;
107672
107673+#ifdef CONFIG_PAX_SEGMEXEC
107674+ struct anon_vma_chain *avc_m = NULL;
107675+#endif
107676+
107677 might_sleep();
107678 if (unlikely(!anon_vma)) {
107679 struct mm_struct *mm = vma->vm_mm;
107680@@ -175,6 +179,12 @@ int anon_vma_prepare(struct vm_area_struct *vma)
107681 if (!avc)
107682 goto out_enomem;
107683
107684+#ifdef CONFIG_PAX_SEGMEXEC
107685+ avc_m = anon_vma_chain_alloc(GFP_KERNEL);
107686+ if (!avc_m)
107687+ goto out_enomem_free_avc;
107688+#endif
107689+
107690 anon_vma = find_mergeable_anon_vma(vma);
107691 allocated = NULL;
107692 if (!anon_vma) {
107693@@ -188,6 +198,19 @@ int anon_vma_prepare(struct vm_area_struct *vma)
107694 /* page_table_lock to protect against threads */
107695 spin_lock(&mm->page_table_lock);
107696 if (likely(!vma->anon_vma)) {
107697+
107698+#ifdef CONFIG_PAX_SEGMEXEC
107699+ struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
107700+
107701+ if (vma_m) {
107702+ BUG_ON(vma_m->anon_vma);
107703+ vma_m->anon_vma = anon_vma;
107704+ anon_vma_chain_link(vma_m, avc_m, anon_vma);
107705+ anon_vma->degree++;
107706+ avc_m = NULL;
107707+ }
107708+#endif
107709+
107710 vma->anon_vma = anon_vma;
107711 anon_vma_chain_link(vma, avc, anon_vma);
107712 /* vma reference or self-parent link for new root */
107713@@ -200,12 +223,24 @@ int anon_vma_prepare(struct vm_area_struct *vma)
107714
107715 if (unlikely(allocated))
107716 put_anon_vma(allocated);
107717+
107718+#ifdef CONFIG_PAX_SEGMEXEC
107719+ if (unlikely(avc_m))
107720+ anon_vma_chain_free(avc_m);
107721+#endif
107722+
107723 if (unlikely(avc))
107724 anon_vma_chain_free(avc);
107725 }
107726 return 0;
107727
107728 out_enomem_free_avc:
107729+
107730+#ifdef CONFIG_PAX_SEGMEXEC
107731+ if (avc_m)
107732+ anon_vma_chain_free(avc_m);
107733+#endif
107734+
107735 anon_vma_chain_free(avc);
107736 out_enomem:
107737 return -ENOMEM;
107738@@ -249,7 +284,7 @@ static inline void unlock_anon_vma_root(struct anon_vma *root)
107739 * good chance of avoiding scanning the whole hierarchy when it searches where
107740 * page is mapped.
107741 */
107742-int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
107743+int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
107744 {
107745 struct anon_vma_chain *avc, *pavc;
107746 struct anon_vma *root = NULL;
107747@@ -303,7 +338,7 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
107748 * the corresponding VMA in the parent process is attached to.
107749 * Returns 0 on success, non-zero on failure.
107750 */
107751-int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
107752+int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
107753 {
107754 struct anon_vma_chain *avc;
107755 struct anon_vma *anon_vma;
107756@@ -423,8 +458,10 @@ static void anon_vma_ctor(void *data)
107757 void __init anon_vma_init(void)
107758 {
107759 anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma),
107760- 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC, anon_vma_ctor);
107761- anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain, SLAB_PANIC);
107762+ 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC|SLAB_NO_SANITIZE,
107763+ anon_vma_ctor);
107764+ anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain,
107765+ SLAB_PANIC|SLAB_NO_SANITIZE);
107766 }
107767
107768 /*
107769diff --git a/mm/shmem.c b/mm/shmem.c
107770index cf2d0ca..ec06b8b 100644
107771--- a/mm/shmem.c
107772+++ b/mm/shmem.c
107773@@ -33,7 +33,7 @@
107774 #include <linux/swap.h>
107775 #include <linux/aio.h>
107776
107777-static struct vfsmount *shm_mnt;
107778+struct vfsmount *shm_mnt;
107779
107780 #ifdef CONFIG_SHMEM
107781 /*
107782@@ -80,7 +80,7 @@ static struct vfsmount *shm_mnt;
107783 #define BOGO_DIRENT_SIZE 20
107784
107785 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
107786-#define SHORT_SYMLINK_LEN 128
107787+#define SHORT_SYMLINK_LEN 64
107788
107789 /*
107790 * shmem_fallocate communicates with shmem_fault or shmem_writepage via
107791@@ -2555,6 +2555,11 @@ static const struct xattr_handler *shmem_xattr_handlers[] = {
107792 static int shmem_xattr_validate(const char *name)
107793 {
107794 struct { const char *prefix; size_t len; } arr[] = {
107795+
107796+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
107797+ { XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN},
107798+#endif
107799+
107800 { XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN },
107801 { XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN }
107802 };
107803@@ -2610,6 +2615,15 @@ static int shmem_setxattr(struct dentry *dentry, const char *name,
107804 if (err)
107805 return err;
107806
107807+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
107808+ if (!strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN)) {
107809+ if (strcmp(name, XATTR_NAME_PAX_FLAGS))
107810+ return -EOPNOTSUPP;
107811+ if (size > 8)
107812+ return -EINVAL;
107813+ }
107814+#endif
107815+
107816 return simple_xattr_set(&info->xattrs, name, value, size, flags);
107817 }
107818
107819@@ -2993,8 +3007,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
107820 int err = -ENOMEM;
107821
107822 /* Round up to L1_CACHE_BYTES to resist false sharing */
107823- sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
107824- L1_CACHE_BYTES), GFP_KERNEL);
107825+ sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
107826 if (!sbinfo)
107827 return -ENOMEM;
107828
107829diff --git a/mm/slab.c b/mm/slab.c
107830index c4b89ea..20990be 100644
107831--- a/mm/slab.c
107832+++ b/mm/slab.c
107833@@ -314,10 +314,12 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent)
107834 if ((x)->max_freeable < i) \
107835 (x)->max_freeable = i; \
107836 } while (0)
107837-#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
107838-#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
107839-#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
107840-#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
107841+#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
107842+#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
107843+#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
107844+#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
107845+#define STATS_INC_SANITIZED(x) atomic_inc_unchecked(&(x)->sanitized)
107846+#define STATS_INC_NOT_SANITIZED(x) atomic_inc_unchecked(&(x)->not_sanitized)
107847 #else
107848 #define STATS_INC_ACTIVE(x) do { } while (0)
107849 #define STATS_DEC_ACTIVE(x) do { } while (0)
107850@@ -334,6 +336,8 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent)
107851 #define STATS_INC_ALLOCMISS(x) do { } while (0)
107852 #define STATS_INC_FREEHIT(x) do { } while (0)
107853 #define STATS_INC_FREEMISS(x) do { } while (0)
107854+#define STATS_INC_SANITIZED(x) do { } while (0)
107855+#define STATS_INC_NOT_SANITIZED(x) do { } while (0)
107856 #endif
107857
107858 #if DEBUG
107859@@ -450,7 +454,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct page *page,
107860 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
107861 */
107862 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
107863- const struct page *page, void *obj)
107864+ const struct page *page, const void *obj)
107865 {
107866 u32 offset = (obj - page->s_mem);
107867 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
107868@@ -1438,7 +1442,7 @@ void __init kmem_cache_init(void)
107869 * structures first. Without this, further allocations will bug.
107870 */
107871 kmalloc_caches[INDEX_NODE] = create_kmalloc_cache("kmalloc-node",
107872- kmalloc_size(INDEX_NODE), ARCH_KMALLOC_FLAGS);
107873+ kmalloc_size(INDEX_NODE), SLAB_USERCOPY | ARCH_KMALLOC_FLAGS);
107874 slab_state = PARTIAL_NODE;
107875
107876 slab_early_init = 0;
107877@@ -2059,7 +2063,7 @@ __kmem_cache_alias(const char *name, size_t size, size_t align,
107878
107879 cachep = find_mergeable(size, align, flags, name, ctor);
107880 if (cachep) {
107881- cachep->refcount++;
107882+ atomic_inc(&cachep->refcount);
107883
107884 /*
107885 * Adjust the object sizes so that we clear
107886@@ -3357,6 +3361,20 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp,
107887 struct array_cache *ac = cpu_cache_get(cachep);
107888
107889 check_irq_off();
107890+
107891+#ifdef CONFIG_PAX_MEMORY_SANITIZE
107892+ if (cachep->flags & (SLAB_POISON | SLAB_NO_SANITIZE))
107893+ STATS_INC_NOT_SANITIZED(cachep);
107894+ else {
107895+ memset(objp, PAX_MEMORY_SANITIZE_VALUE, cachep->object_size);
107896+
107897+ if (cachep->ctor)
107898+ cachep->ctor(objp);
107899+
107900+ STATS_INC_SANITIZED(cachep);
107901+ }
107902+#endif
107903+
107904 kmemleak_free_recursive(objp, cachep->flags);
107905 objp = cache_free_debugcheck(cachep, objp, caller);
107906
107907@@ -3469,7 +3487,7 @@ __do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller)
107908 return kmem_cache_alloc_node_trace(cachep, flags, node, size);
107909 }
107910
107911-void *__kmalloc_node(size_t size, gfp_t flags, int node)
107912+void * __size_overflow(1) __kmalloc_node(size_t size, gfp_t flags, int node)
107913 {
107914 return __do_kmalloc_node(size, flags, node, _RET_IP_);
107915 }
107916@@ -3489,7 +3507,7 @@ EXPORT_SYMBOL(__kmalloc_node_track_caller);
107917 * @flags: the type of memory to allocate (see kmalloc).
107918 * @caller: function caller for debug tracking of the caller
107919 */
107920-static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
107921+static __always_inline void * __size_overflow(1) __do_kmalloc(size_t size, gfp_t flags,
107922 unsigned long caller)
107923 {
107924 struct kmem_cache *cachep;
107925@@ -3562,6 +3580,7 @@ void kfree(const void *objp)
107926
107927 if (unlikely(ZERO_OR_NULL_PTR(objp)))
107928 return;
107929+ VM_BUG_ON(!virt_addr_valid(objp));
107930 local_irq_save(flags);
107931 kfree_debugcheck(objp);
107932 c = virt_to_cache(objp);
107933@@ -3981,14 +4000,22 @@ void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep)
107934 }
107935 /* cpu stats */
107936 {
107937- unsigned long allochit = atomic_read(&cachep->allochit);
107938- unsigned long allocmiss = atomic_read(&cachep->allocmiss);
107939- unsigned long freehit = atomic_read(&cachep->freehit);
107940- unsigned long freemiss = atomic_read(&cachep->freemiss);
107941+ unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
107942+ unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
107943+ unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
107944+ unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
107945
107946 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
107947 allochit, allocmiss, freehit, freemiss);
107948 }
107949+#ifdef CONFIG_PAX_MEMORY_SANITIZE
107950+ {
107951+ unsigned long sanitized = atomic_read_unchecked(&cachep->sanitized);
107952+ unsigned long not_sanitized = atomic_read_unchecked(&cachep->not_sanitized);
107953+
107954+ seq_printf(m, " : pax %6lu %6lu", sanitized, not_sanitized);
107955+ }
107956+#endif
107957 #endif
107958 }
107959
107960@@ -4196,13 +4223,69 @@ static const struct file_operations proc_slabstats_operations = {
107961 static int __init slab_proc_init(void)
107962 {
107963 #ifdef CONFIG_DEBUG_SLAB_LEAK
107964- proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
107965+ proc_create("slab_allocators", S_IRUSR, NULL, &proc_slabstats_operations);
107966 #endif
107967 return 0;
107968 }
107969 module_init(slab_proc_init);
107970 #endif
107971
107972+bool is_usercopy_object(const void *ptr)
107973+{
107974+ struct page *page;
107975+ struct kmem_cache *cachep;
107976+
107977+ if (ZERO_OR_NULL_PTR(ptr))
107978+ return false;
107979+
107980+ if (!slab_is_available())
107981+ return false;
107982+
107983+ if (!virt_addr_valid(ptr))
107984+ return false;
107985+
107986+ page = virt_to_head_page(ptr);
107987+
107988+ if (!PageSlab(page))
107989+ return false;
107990+
107991+ cachep = page->slab_cache;
107992+ return cachep->flags & SLAB_USERCOPY;
107993+}
107994+
107995+#ifdef CONFIG_PAX_USERCOPY
107996+const char *check_heap_object(const void *ptr, unsigned long n)
107997+{
107998+ struct page *page;
107999+ struct kmem_cache *cachep;
108000+ unsigned int objnr;
108001+ unsigned long offset;
108002+
108003+ if (ZERO_OR_NULL_PTR(ptr))
108004+ return "<null>";
108005+
108006+ if (!virt_addr_valid(ptr))
108007+ return NULL;
108008+
108009+ page = virt_to_head_page(ptr);
108010+
108011+ if (!PageSlab(page))
108012+ return NULL;
108013+
108014+ cachep = page->slab_cache;
108015+ if (!(cachep->flags & SLAB_USERCOPY))
108016+ return cachep->name;
108017+
108018+ objnr = obj_to_index(cachep, page, ptr);
108019+ BUG_ON(objnr >= cachep->num);
108020+ offset = ptr - index_to_obj(cachep, page, objnr) - obj_offset(cachep);
108021+ if (offset <= cachep->object_size && n <= cachep->object_size - offset)
108022+ return NULL;
108023+
108024+ return cachep->name;
108025+}
108026+#endif
108027+
108028 /**
108029 * ksize - get the actual amount of memory allocated for a given object
108030 * @objp: Pointer to the object
108031diff --git a/mm/slab.h b/mm/slab.h
108032index 4c3ac12..7b2e470 100644
108033--- a/mm/slab.h
108034+++ b/mm/slab.h
108035@@ -22,7 +22,7 @@ struct kmem_cache {
108036 unsigned int align; /* Alignment as calculated */
108037 unsigned long flags; /* Active flags on the slab */
108038 const char *name; /* Slab name for sysfs */
108039- int refcount; /* Use counter */
108040+ atomic_t refcount; /* Use counter */
108041 void (*ctor)(void *); /* Called on object slot creation */
108042 struct list_head list; /* List of all slab caches on the system */
108043 };
108044@@ -66,6 +66,20 @@ extern struct list_head slab_caches;
108045 /* The slab cache that manages slab cache information */
108046 extern struct kmem_cache *kmem_cache;
108047
108048+#ifdef CONFIG_PAX_MEMORY_SANITIZE
108049+#ifdef CONFIG_X86_64
108050+#define PAX_MEMORY_SANITIZE_VALUE '\xfe'
108051+#else
108052+#define PAX_MEMORY_SANITIZE_VALUE '\xff'
108053+#endif
108054+enum pax_sanitize_mode {
108055+ PAX_SANITIZE_SLAB_OFF = 0,
108056+ PAX_SANITIZE_SLAB_FAST,
108057+ PAX_SANITIZE_SLAB_FULL,
108058+};
108059+extern enum pax_sanitize_mode pax_sanitize_slab;
108060+#endif
108061+
108062 unsigned long calculate_alignment(unsigned long flags,
108063 unsigned long align, unsigned long size);
108064
108065@@ -114,7 +128,8 @@ static inline unsigned long kmem_cache_flags(unsigned long object_size,
108066
108067 /* Legal flag mask for kmem_cache_create(), for various configurations */
108068 #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \
108069- SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS )
108070+ SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS | \
108071+ SLAB_USERCOPY | SLAB_NO_SANITIZE)
108072
108073 #if defined(CONFIG_DEBUG_SLAB)
108074 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
108075@@ -315,6 +330,9 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
108076 return s;
108077
108078 page = virt_to_head_page(x);
108079+
108080+ BUG_ON(!PageSlab(page));
108081+
108082 cachep = page->slab_cache;
108083 if (slab_equal_or_root(cachep, s))
108084 return cachep;
108085diff --git a/mm/slab_common.c b/mm/slab_common.c
108086index 999bb34..9843aea 100644
108087--- a/mm/slab_common.c
108088+++ b/mm/slab_common.c
108089@@ -25,11 +25,35 @@
108090
108091 #include "slab.h"
108092
108093-enum slab_state slab_state;
108094+enum slab_state slab_state __read_only;
108095 LIST_HEAD(slab_caches);
108096 DEFINE_MUTEX(slab_mutex);
108097 struct kmem_cache *kmem_cache;
108098
108099+#ifdef CONFIG_PAX_MEMORY_SANITIZE
108100+enum pax_sanitize_mode pax_sanitize_slab __read_only = PAX_SANITIZE_SLAB_FAST;
108101+static int __init pax_sanitize_slab_setup(char *str)
108102+{
108103+ if (!str)
108104+ return 0;
108105+
108106+ if (!strcmp(str, "0") || !strcmp(str, "off")) {
108107+ pr_info("PaX slab sanitization: %s\n", "disabled");
108108+ pax_sanitize_slab = PAX_SANITIZE_SLAB_OFF;
108109+ } else if (!strcmp(str, "1") || !strcmp(str, "fast")) {
108110+ pr_info("PaX slab sanitization: %s\n", "fast");
108111+ pax_sanitize_slab = PAX_SANITIZE_SLAB_FAST;
108112+ } else if (!strcmp(str, "full")) {
108113+ pr_info("PaX slab sanitization: %s\n", "full");
108114+ pax_sanitize_slab = PAX_SANITIZE_SLAB_FULL;
108115+ } else
108116+ pr_err("PaX slab sanitization: unsupported option '%s'\n", str);
108117+
108118+ return 0;
108119+}
108120+early_param("pax_sanitize_slab", pax_sanitize_slab_setup);
108121+#endif
108122+
108123 /*
108124 * Set of flags that will prevent slab merging
108125 */
108126@@ -44,7 +68,7 @@ struct kmem_cache *kmem_cache;
108127 * Merge control. If this is set then no merging of slab caches will occur.
108128 * (Could be removed. This was introduced to pacify the merge skeptics.)
108129 */
108130-static int slab_nomerge;
108131+static int slab_nomerge = 1;
108132
108133 static int __init setup_slab_nomerge(char *str)
108134 {
108135@@ -217,7 +241,7 @@ int slab_unmergeable(struct kmem_cache *s)
108136 /*
108137 * We may have set a slab to be unmergeable during bootstrap.
108138 */
108139- if (s->refcount < 0)
108140+ if (atomic_read(&s->refcount) < 0)
108141 return 1;
108142
108143 return 0;
108144@@ -321,7 +345,7 @@ do_kmem_cache_create(const char *name, size_t object_size, size_t size,
108145 if (err)
108146 goto out_free_cache;
108147
108148- s->refcount = 1;
108149+ atomic_set(&s->refcount, 1);
108150 list_add(&s->list, &slab_caches);
108151 out:
108152 if (err)
108153@@ -386,6 +410,13 @@ kmem_cache_create(const char *name, size_t size, size_t align,
108154 */
108155 flags &= CACHE_CREATE_MASK;
108156
108157+#ifdef CONFIG_PAX_MEMORY_SANITIZE
108158+ if (pax_sanitize_slab == PAX_SANITIZE_SLAB_OFF || (flags & SLAB_DESTROY_BY_RCU))
108159+ flags |= SLAB_NO_SANITIZE;
108160+ else if (pax_sanitize_slab == PAX_SANITIZE_SLAB_FULL)
108161+ flags &= ~SLAB_NO_SANITIZE;
108162+#endif
108163+
108164 s = __kmem_cache_alias(name, size, align, flags, ctor);
108165 if (s)
108166 goto out_unlock;
108167@@ -456,7 +487,7 @@ static void do_kmem_cache_release(struct list_head *release,
108168 rcu_barrier();
108169
108170 list_for_each_entry_safe(s, s2, release, list) {
108171-#ifdef SLAB_SUPPORTS_SYSFS
108172+#if defined(SLAB_SUPPORTS_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
108173 sysfs_slab_remove(s);
108174 #else
108175 slab_kmem_cache_release(s);
108176@@ -625,8 +656,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
108177
108178 mutex_lock(&slab_mutex);
108179
108180- s->refcount--;
108181- if (s->refcount)
108182+ if (!atomic_dec_and_test(&s->refcount))
108183 goto out_unlock;
108184
108185 for_each_memcg_cache_safe(c, c2, s) {
108186@@ -691,7 +721,7 @@ void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t siz
108187 panic("Creation of kmalloc slab %s size=%zu failed. Reason %d\n",
108188 name, size, err);
108189
108190- s->refcount = -1; /* Exempt from merging for now */
108191+ atomic_set(&s->refcount, -1); /* Exempt from merging for now */
108192 }
108193
108194 struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
108195@@ -704,7 +734,7 @@ struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
108196
108197 create_boot_cache(s, name, size, flags);
108198 list_add(&s->list, &slab_caches);
108199- s->refcount = 1;
108200+ atomic_set(&s->refcount, 1);
108201 return s;
108202 }
108203
108204@@ -716,6 +746,11 @@ struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
108205 EXPORT_SYMBOL(kmalloc_dma_caches);
108206 #endif
108207
108208+#ifdef CONFIG_PAX_USERCOPY_SLABS
108209+struct kmem_cache *kmalloc_usercopy_caches[KMALLOC_SHIFT_HIGH + 1];
108210+EXPORT_SYMBOL(kmalloc_usercopy_caches);
108211+#endif
108212+
108213 /*
108214 * Conversion table for small slabs sizes / 8 to the index in the
108215 * kmalloc array. This is necessary for slabs < 192 since we have non power
108216@@ -780,6 +815,13 @@ struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags)
108217 return kmalloc_dma_caches[index];
108218
108219 #endif
108220+
108221+#ifdef CONFIG_PAX_USERCOPY_SLABS
108222+ if (unlikely((flags & GFP_USERCOPY)))
108223+ return kmalloc_usercopy_caches[index];
108224+
108225+#endif
108226+
108227 return kmalloc_caches[index];
108228 }
108229
108230@@ -836,7 +878,7 @@ void __init create_kmalloc_caches(unsigned long flags)
108231 for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) {
108232 if (!kmalloc_caches[i]) {
108233 kmalloc_caches[i] = create_kmalloc_cache(NULL,
108234- 1 << i, flags);
108235+ 1 << i, SLAB_USERCOPY | flags);
108236 }
108237
108238 /*
108239@@ -845,10 +887,10 @@ void __init create_kmalloc_caches(unsigned long flags)
108240 * earlier power of two caches
108241 */
108242 if (KMALLOC_MIN_SIZE <= 32 && !kmalloc_caches[1] && i == 6)
108243- kmalloc_caches[1] = create_kmalloc_cache(NULL, 96, flags);
108244+ kmalloc_caches[1] = create_kmalloc_cache(NULL, 96, SLAB_USERCOPY | flags);
108245
108246 if (KMALLOC_MIN_SIZE <= 64 && !kmalloc_caches[2] && i == 7)
108247- kmalloc_caches[2] = create_kmalloc_cache(NULL, 192, flags);
108248+ kmalloc_caches[2] = create_kmalloc_cache(NULL, 192, SLAB_USERCOPY | flags);
108249 }
108250
108251 /* Kmalloc array is now usable */
108252@@ -881,6 +923,23 @@ void __init create_kmalloc_caches(unsigned long flags)
108253 }
108254 }
108255 #endif
108256+
108257+#ifdef CONFIG_PAX_USERCOPY_SLABS
108258+ for (i = 0; i <= KMALLOC_SHIFT_HIGH; i++) {
108259+ struct kmem_cache *s = kmalloc_caches[i];
108260+
108261+ if (s) {
108262+ int size = kmalloc_size(i);
108263+ char *n = kasprintf(GFP_NOWAIT,
108264+ "usercopy-kmalloc-%d", size);
108265+
108266+ BUG_ON(!n);
108267+ kmalloc_usercopy_caches[i] = create_kmalloc_cache(n,
108268+ size, SLAB_USERCOPY | flags);
108269+ }
108270+ }
108271+#endif
108272+
108273 }
108274 #endif /* !CONFIG_SLOB */
108275
108276@@ -940,6 +999,9 @@ static void print_slabinfo_header(struct seq_file *m)
108277 seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> "
108278 "<error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
108279 seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
108280+#ifdef CONFIG_PAX_MEMORY_SANITIZE
108281+ seq_puts(m, " : pax <sanitized> <not_sanitized>");
108282+#endif
108283 #endif
108284 seq_putc(m, '\n');
108285 }
108286@@ -1069,7 +1131,7 @@ static int __init slab_proc_init(void)
108287 module_init(slab_proc_init);
108288 #endif /* CONFIG_SLABINFO */
108289
108290-static __always_inline void *__do_krealloc(const void *p, size_t new_size,
108291+static __always_inline void * __size_overflow(2) __do_krealloc(const void *p, size_t new_size,
108292 gfp_t flags)
108293 {
108294 void *ret;
108295diff --git a/mm/slob.c b/mm/slob.c
108296index 94a7fed..cf3fb1a 100644
108297--- a/mm/slob.c
108298+++ b/mm/slob.c
108299@@ -157,7 +157,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
108300 /*
108301 * Return the size of a slob block.
108302 */
108303-static slobidx_t slob_units(slob_t *s)
108304+static slobidx_t slob_units(const slob_t *s)
108305 {
108306 if (s->units > 0)
108307 return s->units;
108308@@ -167,7 +167,7 @@ static slobidx_t slob_units(slob_t *s)
108309 /*
108310 * Return the next free slob block pointer after this one.
108311 */
108312-static slob_t *slob_next(slob_t *s)
108313+static slob_t *slob_next(const slob_t *s)
108314 {
108315 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
108316 slobidx_t next;
108317@@ -182,14 +182,14 @@ static slob_t *slob_next(slob_t *s)
108318 /*
108319 * Returns true if s is the last free block in its page.
108320 */
108321-static int slob_last(slob_t *s)
108322+static int slob_last(const slob_t *s)
108323 {
108324 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
108325 }
108326
108327-static void *slob_new_pages(gfp_t gfp, int order, int node)
108328+static struct page *slob_new_pages(gfp_t gfp, unsigned int order, int node)
108329 {
108330- void *page;
108331+ struct page *page;
108332
108333 #ifdef CONFIG_NUMA
108334 if (node != NUMA_NO_NODE)
108335@@ -201,14 +201,18 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
108336 if (!page)
108337 return NULL;
108338
108339- return page_address(page);
108340+ __SetPageSlab(page);
108341+ return page;
108342 }
108343
108344-static void slob_free_pages(void *b, int order)
108345+static void slob_free_pages(struct page *sp, int order)
108346 {
108347 if (current->reclaim_state)
108348 current->reclaim_state->reclaimed_slab += 1 << order;
108349- free_pages((unsigned long)b, order);
108350+ __ClearPageSlab(sp);
108351+ page_mapcount_reset(sp);
108352+ sp->private = 0;
108353+ __free_pages(sp, order);
108354 }
108355
108356 /*
108357@@ -313,15 +317,15 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
108358
108359 /* Not enough space: must allocate a new page */
108360 if (!b) {
108361- b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
108362- if (!b)
108363+ sp = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
108364+ if (!sp)
108365 return NULL;
108366- sp = virt_to_page(b);
108367- __SetPageSlab(sp);
108368+ b = page_address(sp);
108369
108370 spin_lock_irqsave(&slob_lock, flags);
108371 sp->units = SLOB_UNITS(PAGE_SIZE);
108372 sp->freelist = b;
108373+ sp->private = 0;
108374 INIT_LIST_HEAD(&sp->lru);
108375 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
108376 set_slob_page_free(sp, slob_list);
108377@@ -337,7 +341,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
108378 /*
108379 * slob_free: entry point into the slob allocator.
108380 */
108381-static void slob_free(void *block, int size)
108382+static void slob_free(struct kmem_cache *c, void *block, int size)
108383 {
108384 struct page *sp;
108385 slob_t *prev, *next, *b = (slob_t *)block;
108386@@ -359,12 +363,15 @@ static void slob_free(void *block, int size)
108387 if (slob_page_free(sp))
108388 clear_slob_page_free(sp);
108389 spin_unlock_irqrestore(&slob_lock, flags);
108390- __ClearPageSlab(sp);
108391- page_mapcount_reset(sp);
108392- slob_free_pages(b, 0);
108393+ slob_free_pages(sp, 0);
108394 return;
108395 }
108396
108397+#ifdef CONFIG_PAX_MEMORY_SANITIZE
108398+ if (pax_sanitize_slab && !(c && (c->flags & SLAB_NO_SANITIZE)))
108399+ memset(block, PAX_MEMORY_SANITIZE_VALUE, size);
108400+#endif
108401+
108402 if (!slob_page_free(sp)) {
108403 /* This slob page is about to become partially free. Easy! */
108404 sp->units = units;
108405@@ -424,11 +431,10 @@ out:
108406 */
108407
108408 static __always_inline void *
108409-__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
108410+__do_kmalloc_node_align(size_t size, gfp_t gfp, int node, unsigned long caller, int align)
108411 {
108412- unsigned int *m;
108413- int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
108414- void *ret;
108415+ slob_t *m;
108416+ void *ret = NULL;
108417
108418 gfp &= gfp_allowed_mask;
108419
108420@@ -442,27 +448,45 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
108421
108422 if (!m)
108423 return NULL;
108424- *m = size;
108425+ BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
108426+ BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
108427+ m[0].units = size;
108428+ m[1].units = align;
108429 ret = (void *)m + align;
108430
108431 trace_kmalloc_node(caller, ret,
108432 size, size + align, gfp, node);
108433 } else {
108434 unsigned int order = get_order(size);
108435+ struct page *page;
108436
108437 if (likely(order))
108438 gfp |= __GFP_COMP;
108439- ret = slob_new_pages(gfp, order, node);
108440+ page = slob_new_pages(gfp, order, node);
108441+ if (page) {
108442+ ret = page_address(page);
108443+ page->private = size;
108444+ }
108445
108446 trace_kmalloc_node(caller, ret,
108447 size, PAGE_SIZE << order, gfp, node);
108448 }
108449
108450- kmemleak_alloc(ret, size, 1, gfp);
108451 return ret;
108452 }
108453
108454-void *__kmalloc(size_t size, gfp_t gfp)
108455+static __always_inline void *
108456+__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
108457+{
108458+ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
108459+ void *ret = __do_kmalloc_node_align(size, gfp, node, caller, align);
108460+
108461+ if (!ZERO_OR_NULL_PTR(ret))
108462+ kmemleak_alloc(ret, size, 1, gfp);
108463+ return ret;
108464+}
108465+
108466+void * __size_overflow(1) __kmalloc(size_t size, gfp_t gfp)
108467 {
108468 return __do_kmalloc_node(size, gfp, NUMA_NO_NODE, _RET_IP_);
108469 }
108470@@ -491,34 +515,112 @@ void kfree(const void *block)
108471 return;
108472 kmemleak_free(block);
108473
108474+ VM_BUG_ON(!virt_addr_valid(block));
108475 sp = virt_to_page(block);
108476- if (PageSlab(sp)) {
108477+ VM_BUG_ON(!PageSlab(sp));
108478+ if (!sp->private) {
108479 int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
108480- unsigned int *m = (unsigned int *)(block - align);
108481- slob_free(m, *m + align);
108482- } else
108483+ slob_t *m = (slob_t *)(block - align);
108484+ slob_free(NULL, m, m[0].units + align);
108485+ } else {
108486+ __ClearPageSlab(sp);
108487+ page_mapcount_reset(sp);
108488+ sp->private = 0;
108489 __free_pages(sp, compound_order(sp));
108490+ }
108491 }
108492 EXPORT_SYMBOL(kfree);
108493
108494+bool is_usercopy_object(const void *ptr)
108495+{
108496+ if (!slab_is_available())
108497+ return false;
108498+
108499+ // PAX: TODO
108500+
108501+ return false;
108502+}
108503+
108504+#ifdef CONFIG_PAX_USERCOPY
108505+const char *check_heap_object(const void *ptr, unsigned long n)
108506+{
108507+ struct page *page;
108508+ const slob_t *free;
108509+ const void *base;
108510+ unsigned long flags;
108511+
108512+ if (ZERO_OR_NULL_PTR(ptr))
108513+ return "<null>";
108514+
108515+ if (!virt_addr_valid(ptr))
108516+ return NULL;
108517+
108518+ page = virt_to_head_page(ptr);
108519+ if (!PageSlab(page))
108520+ return NULL;
108521+
108522+ if (page->private) {
108523+ base = page;
108524+ if (base <= ptr && n <= page->private - (ptr - base))
108525+ return NULL;
108526+ return "<slob>";
108527+ }
108528+
108529+ /* some tricky double walking to find the chunk */
108530+ spin_lock_irqsave(&slob_lock, flags);
108531+ base = (void *)((unsigned long)ptr & PAGE_MASK);
108532+ free = page->freelist;
108533+
108534+ while (!slob_last(free) && (void *)free <= ptr) {
108535+ base = free + slob_units(free);
108536+ free = slob_next(free);
108537+ }
108538+
108539+ while (base < (void *)free) {
108540+ slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
108541+ int size = SLOB_UNIT * SLOB_UNITS(m + align);
108542+ int offset;
108543+
108544+ if (ptr < base + align)
108545+ break;
108546+
108547+ offset = ptr - base - align;
108548+ if (offset >= m) {
108549+ base += size;
108550+ continue;
108551+ }
108552+
108553+ if (n > m - offset)
108554+ break;
108555+
108556+ spin_unlock_irqrestore(&slob_lock, flags);
108557+ return NULL;
108558+ }
108559+
108560+ spin_unlock_irqrestore(&slob_lock, flags);
108561+ return "<slob>";
108562+}
108563+#endif
108564+
108565 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
108566 size_t ksize(const void *block)
108567 {
108568 struct page *sp;
108569 int align;
108570- unsigned int *m;
108571+ slob_t *m;
108572
108573 BUG_ON(!block);
108574 if (unlikely(block == ZERO_SIZE_PTR))
108575 return 0;
108576
108577 sp = virt_to_page(block);
108578- if (unlikely(!PageSlab(sp)))
108579- return PAGE_SIZE << compound_order(sp);
108580+ VM_BUG_ON(!PageSlab(sp));
108581+ if (sp->private)
108582+ return sp->private;
108583
108584 align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
108585- m = (unsigned int *)(block - align);
108586- return SLOB_UNITS(*m) * SLOB_UNIT;
108587+ m = (slob_t *)(block - align);
108588+ return SLOB_UNITS(m[0].units) * SLOB_UNIT;
108589 }
108590 EXPORT_SYMBOL(ksize);
108591
108592@@ -534,23 +636,33 @@ int __kmem_cache_create(struct kmem_cache *c, unsigned long flags)
108593
108594 void *slob_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
108595 {
108596- void *b;
108597+ void *b = NULL;
108598
108599 flags &= gfp_allowed_mask;
108600
108601 lockdep_trace_alloc(flags);
108602
108603+#ifdef CONFIG_PAX_USERCOPY_SLABS
108604+ b = __do_kmalloc_node_align(c->size, flags, node, _RET_IP_, c->align);
108605+#else
108606 if (c->size < PAGE_SIZE) {
108607 b = slob_alloc(c->size, flags, c->align, node);
108608 trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
108609 SLOB_UNITS(c->size) * SLOB_UNIT,
108610 flags, node);
108611 } else {
108612- b = slob_new_pages(flags, get_order(c->size), node);
108613+ struct page *sp;
108614+
108615+ sp = slob_new_pages(flags, get_order(c->size), node);
108616+ if (sp) {
108617+ b = page_address(sp);
108618+ sp->private = c->size;
108619+ }
108620 trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
108621 PAGE_SIZE << get_order(c->size),
108622 flags, node);
108623 }
108624+#endif
108625
108626 if (b && c->ctor)
108627 c->ctor(b);
108628@@ -567,7 +679,7 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
108629 EXPORT_SYMBOL(kmem_cache_alloc);
108630
108631 #ifdef CONFIG_NUMA
108632-void *__kmalloc_node(size_t size, gfp_t gfp, int node)
108633+void * __size_overflow(1) __kmalloc_node(size_t size, gfp_t gfp, int node)
108634 {
108635 return __do_kmalloc_node(size, gfp, node, _RET_IP_);
108636 }
108637@@ -580,12 +692,16 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t gfp, int node)
108638 EXPORT_SYMBOL(kmem_cache_alloc_node);
108639 #endif
108640
108641-static void __kmem_cache_free(void *b, int size)
108642+static void __kmem_cache_free(struct kmem_cache *c, void *b, int size)
108643 {
108644- if (size < PAGE_SIZE)
108645- slob_free(b, size);
108646+ struct page *sp;
108647+
108648+ sp = virt_to_page(b);
108649+ BUG_ON(!PageSlab(sp));
108650+ if (!sp->private)
108651+ slob_free(c, b, size);
108652 else
108653- slob_free_pages(b, get_order(size));
108654+ slob_free_pages(sp, get_order(size));
108655 }
108656
108657 static void kmem_rcu_free(struct rcu_head *head)
108658@@ -593,22 +709,36 @@ static void kmem_rcu_free(struct rcu_head *head)
108659 struct slob_rcu *slob_rcu = (struct slob_rcu *)head;
108660 void *b = (void *)slob_rcu - (slob_rcu->size - sizeof(struct slob_rcu));
108661
108662- __kmem_cache_free(b, slob_rcu->size);
108663+ __kmem_cache_free(NULL, b, slob_rcu->size);
108664 }
108665
108666 void kmem_cache_free(struct kmem_cache *c, void *b)
108667 {
108668+ int size = c->size;
108669+
108670+#ifdef CONFIG_PAX_USERCOPY_SLABS
108671+ if (size + c->align < PAGE_SIZE) {
108672+ size += c->align;
108673+ b -= c->align;
108674+ }
108675+#endif
108676+
108677 kmemleak_free_recursive(b, c->flags);
108678 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
108679 struct slob_rcu *slob_rcu;
108680- slob_rcu = b + (c->size - sizeof(struct slob_rcu));
108681- slob_rcu->size = c->size;
108682+ slob_rcu = b + (size - sizeof(struct slob_rcu));
108683+ slob_rcu->size = size;
108684 call_rcu(&slob_rcu->head, kmem_rcu_free);
108685 } else {
108686- __kmem_cache_free(b, c->size);
108687+ __kmem_cache_free(c, b, size);
108688 }
108689
108690+#ifdef CONFIG_PAX_USERCOPY_SLABS
108691+ trace_kfree(_RET_IP_, b);
108692+#else
108693 trace_kmem_cache_free(_RET_IP_, b);
108694+#endif
108695+
108696 }
108697 EXPORT_SYMBOL(kmem_cache_free);
108698
108699diff --git a/mm/slub.c b/mm/slub.c
108700index 82c4737..55c316a 100644
108701--- a/mm/slub.c
108702+++ b/mm/slub.c
108703@@ -198,7 +198,7 @@ struct track {
108704
108705 enum track_item { TRACK_ALLOC, TRACK_FREE };
108706
108707-#ifdef CONFIG_SYSFS
108708+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
108709 static int sysfs_slab_add(struct kmem_cache *);
108710 static int sysfs_slab_alias(struct kmem_cache *, const char *);
108711 static void memcg_propagate_slab_attrs(struct kmem_cache *s);
108712@@ -556,7 +556,7 @@ static void print_track(const char *s, struct track *t)
108713 if (!t->addr)
108714 return;
108715
108716- pr_err("INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
108717+ pr_err("INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
108718 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
108719 #ifdef CONFIG_STACKTRACE
108720 {
108721@@ -2709,6 +2709,14 @@ static __always_inline void slab_free(struct kmem_cache *s,
108722
108723 slab_free_hook(s, x);
108724
108725+#ifdef CONFIG_PAX_MEMORY_SANITIZE
108726+ if (!(s->flags & SLAB_NO_SANITIZE)) {
108727+ memset(x, PAX_MEMORY_SANITIZE_VALUE, s->object_size);
108728+ if (s->ctor)
108729+ s->ctor(x);
108730+ }
108731+#endif
108732+
108733 redo:
108734 /*
108735 * Determine the currently cpus per cpu slab.
108736@@ -3050,6 +3058,9 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
108737 s->inuse = size;
108738
108739 if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) ||
108740+#ifdef CONFIG_PAX_MEMORY_SANITIZE
108741+ (!(flags & SLAB_NO_SANITIZE)) ||
108742+#endif
108743 s->ctor)) {
108744 /*
108745 * Relocate free pointer after the object if it is not
108746@@ -3304,7 +3315,7 @@ static int __init setup_slub_min_objects(char *str)
108747
108748 __setup("slub_min_objects=", setup_slub_min_objects);
108749
108750-void *__kmalloc(size_t size, gfp_t flags)
108751+void * __size_overflow(1) __kmalloc(size_t size, gfp_t flags)
108752 {
108753 struct kmem_cache *s;
108754 void *ret;
108755@@ -3342,7 +3353,7 @@ static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
108756 return ptr;
108757 }
108758
108759-void *__kmalloc_node(size_t size, gfp_t flags, int node)
108760+void * __size_overflow(1) __kmalloc_node(size_t size, gfp_t flags, int node)
108761 {
108762 struct kmem_cache *s;
108763 void *ret;
108764@@ -3390,6 +3401,59 @@ static size_t __ksize(const void *object)
108765 return slab_ksize(page->slab_cache);
108766 }
108767
108768+bool is_usercopy_object(const void *ptr)
108769+{
108770+ struct page *page;
108771+ struct kmem_cache *s;
108772+
108773+ if (ZERO_OR_NULL_PTR(ptr))
108774+ return false;
108775+
108776+ if (!slab_is_available())
108777+ return false;
108778+
108779+ if (!virt_addr_valid(ptr))
108780+ return false;
108781+
108782+ page = virt_to_head_page(ptr);
108783+
108784+ if (!PageSlab(page))
108785+ return false;
108786+
108787+ s = page->slab_cache;
108788+ return s->flags & SLAB_USERCOPY;
108789+}
108790+
108791+#ifdef CONFIG_PAX_USERCOPY
108792+const char *check_heap_object(const void *ptr, unsigned long n)
108793+{
108794+ struct page *page;
108795+ struct kmem_cache *s;
108796+ unsigned long offset;
108797+
108798+ if (ZERO_OR_NULL_PTR(ptr))
108799+ return "<null>";
108800+
108801+ if (!virt_addr_valid(ptr))
108802+ return NULL;
108803+
108804+ page = virt_to_head_page(ptr);
108805+
108806+ if (!PageSlab(page))
108807+ return NULL;
108808+
108809+ s = page->slab_cache;
108810+ if (!(s->flags & SLAB_USERCOPY))
108811+ return s->name;
108812+
108813+ offset = (ptr - page_address(page)) % s->size;
108814+ if (offset <= s->object_size && n <= s->object_size - offset)
108815+ return NULL;
108816+
108817+ return s->name;
108818+}
108819+#endif
108820+
108821 size_t ksize(const void *object)
108822 {
108823 size_t size = __ksize(object);
108824@@ -3410,6 +3474,7 @@ void kfree(const void *x)
108825 if (unlikely(ZERO_OR_NULL_PTR(x)))
108826 return;
108827
108828+ VM_BUG_ON(!virt_addr_valid(x));
108829 page = virt_to_head_page(x);
108830 if (unlikely(!PageSlab(page))) {
108831 BUG_ON(!PageCompound(page));
108832@@ -3726,7 +3791,7 @@ __kmem_cache_alias(const char *name, size_t size, size_t align,
108833
108834 s = find_mergeable(size, align, flags, name, ctor);
108835 if (s) {
108836- s->refcount++;
108837+ atomic_inc(&s->refcount);
108838
108839 /*
108840 * Adjust the object sizes so that we clear
108841@@ -3742,7 +3807,7 @@ __kmem_cache_alias(const char *name, size_t size, size_t align,
108842 }
108843
108844 if (sysfs_slab_alias(s, name)) {
108845- s->refcount--;
108846+ atomic_dec(&s->refcount);
108847 s = NULL;
108848 }
108849 }
108850@@ -3859,7 +3924,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
108851 }
108852 #endif
108853
108854-#ifdef CONFIG_SYSFS
108855+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
108856 static int count_inuse(struct page *page)
108857 {
108858 return page->inuse;
108859@@ -4140,7 +4205,11 @@ static int list_locations(struct kmem_cache *s, char *buf,
108860 len += sprintf(buf + len, "%7ld ", l->count);
108861
108862 if (l->addr)
108863+#ifdef CONFIG_GRKERNSEC_HIDESYM
108864+ len += sprintf(buf + len, "%pS", NULL);
108865+#else
108866 len += sprintf(buf + len, "%pS", (void *)l->addr);
108867+#endif
108868 else
108869 len += sprintf(buf + len, "<not-available>");
108870
108871@@ -4238,12 +4307,12 @@ static void __init resiliency_test(void)
108872 validate_slab_cache(kmalloc_caches[9]);
108873 }
108874 #else
108875-#ifdef CONFIG_SYSFS
108876+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
108877 static void resiliency_test(void) {};
108878 #endif
108879 #endif
108880
108881-#ifdef CONFIG_SYSFS
108882+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
108883 enum slab_stat_type {
108884 SL_ALL, /* All slabs */
108885 SL_PARTIAL, /* Only partially allocated slabs */
108886@@ -4480,13 +4549,17 @@ static ssize_t ctor_show(struct kmem_cache *s, char *buf)
108887 {
108888 if (!s->ctor)
108889 return 0;
108890+#ifdef CONFIG_GRKERNSEC_HIDESYM
108891+ return sprintf(buf, "%pS\n", NULL);
108892+#else
108893 return sprintf(buf, "%pS\n", s->ctor);
108894+#endif
108895 }
108896 SLAB_ATTR_RO(ctor);
108897
108898 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
108899 {
108900- return sprintf(buf, "%d\n", s->refcount < 0 ? 0 : s->refcount - 1);
108901+ return sprintf(buf, "%d\n", atomic_read(&s->refcount) < 0 ? 0 : atomic_read(&s->refcount) - 1);
108902 }
108903 SLAB_ATTR_RO(aliases);
108904
108905@@ -4574,6 +4647,22 @@ static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
108906 SLAB_ATTR_RO(cache_dma);
108907 #endif
108908
108909+#ifdef CONFIG_PAX_USERCOPY_SLABS
108910+static ssize_t usercopy_show(struct kmem_cache *s, char *buf)
108911+{
108912+ return sprintf(buf, "%d\n", !!(s->flags & SLAB_USERCOPY));
108913+}
108914+SLAB_ATTR_RO(usercopy);
108915+#endif
108916+
108917+#ifdef CONFIG_PAX_MEMORY_SANITIZE
108918+static ssize_t sanitize_show(struct kmem_cache *s, char *buf)
108919+{
108920+ return sprintf(buf, "%d\n", !(s->flags & SLAB_NO_SANITIZE));
108921+}
108922+SLAB_ATTR_RO(sanitize);
108923+#endif
108924+
108925 static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
108926 {
108927 return sprintf(buf, "%d\n", !!(s->flags & SLAB_DESTROY_BY_RCU));
108928@@ -4629,7 +4718,7 @@ static ssize_t trace_store(struct kmem_cache *s, const char *buf,
108929 * as well as cause other issues like converting a mergeable
108930 * cache into an umergeable one.
108931 */
108932- if (s->refcount > 1)
108933+ if (atomic_read(&s->refcount) > 1)
108934 return -EINVAL;
108935
108936 s->flags &= ~SLAB_TRACE;
108937@@ -4749,7 +4838,7 @@ static ssize_t failslab_show(struct kmem_cache *s, char *buf)
108938 static ssize_t failslab_store(struct kmem_cache *s, const char *buf,
108939 size_t length)
108940 {
108941- if (s->refcount > 1)
108942+ if (atomic_read(&s->refcount) > 1)
108943 return -EINVAL;
108944
108945 s->flags &= ~SLAB_FAILSLAB;
108946@@ -4916,6 +5005,12 @@ static struct attribute *slab_attrs[] = {
108947 #ifdef CONFIG_ZONE_DMA
108948 &cache_dma_attr.attr,
108949 #endif
108950+#ifdef CONFIG_PAX_USERCOPY_SLABS
108951+ &usercopy_attr.attr,
108952+#endif
108953+#ifdef CONFIG_PAX_MEMORY_SANITIZE
108954+ &sanitize_attr.attr,
108955+#endif
108956 #ifdef CONFIG_NUMA
108957 &remote_node_defrag_ratio_attr.attr,
108958 #endif
108959@@ -5157,6 +5252,7 @@ static char *create_unique_id(struct kmem_cache *s)
108960 return name;
108961 }
108962
108963+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
108964 static int sysfs_slab_add(struct kmem_cache *s)
108965 {
108966 int err;
108967@@ -5230,6 +5326,7 @@ void sysfs_slab_remove(struct kmem_cache *s)
108968 kobject_del(&s->kobj);
108969 kobject_put(&s->kobj);
108970 }
108971+#endif
108972
108973 /*
108974 * Need to buffer aliases during bootup until sysfs becomes
108975@@ -5243,6 +5340,7 @@ struct saved_alias {
108976
108977 static struct saved_alias *alias_list;
108978
108979+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
108980 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
108981 {
108982 struct saved_alias *al;
108983@@ -5265,6 +5363,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
108984 alias_list = al;
108985 return 0;
108986 }
108987+#endif
108988
108989 static int __init slab_sysfs_init(void)
108990 {
108991diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
108992index 4cba9c2..b4f9fcc 100644
108993--- a/mm/sparse-vmemmap.c
108994+++ b/mm/sparse-vmemmap.c
108995@@ -131,7 +131,7 @@ pud_t * __meminit vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node)
108996 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
108997 if (!p)
108998 return NULL;
108999- pud_populate(&init_mm, pud, p);
109000+ pud_populate_kernel(&init_mm, pud, p);
109001 }
109002 return pud;
109003 }
109004@@ -143,7 +143,7 @@ pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
109005 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
109006 if (!p)
109007 return NULL;
109008- pgd_populate(&init_mm, pgd, p);
109009+ pgd_populate_kernel(&init_mm, pgd, p);
109010 }
109011 return pgd;
109012 }
109013diff --git a/mm/sparse.c b/mm/sparse.c
109014index d1b48b6..6e8590e 100644
109015--- a/mm/sparse.c
109016+++ b/mm/sparse.c
109017@@ -750,7 +750,7 @@ static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
109018
109019 for (i = 0; i < PAGES_PER_SECTION; i++) {
109020 if (PageHWPoison(&memmap[i])) {
109021- atomic_long_sub(1, &num_poisoned_pages);
109022+ atomic_long_sub_unchecked(1, &num_poisoned_pages);
109023 ClearPageHWPoison(&memmap[i]);
109024 }
109025 }
109026diff --git a/mm/swap.c b/mm/swap.c
109027index cd3a5e6..40c0c8f 100644
109028--- a/mm/swap.c
109029+++ b/mm/swap.c
109030@@ -31,6 +31,7 @@
109031 #include <linux/memcontrol.h>
109032 #include <linux/gfp.h>
109033 #include <linux/uio.h>
109034+#include <linux/hugetlb.h>
109035
109036 #include "internal.h"
109037
109038@@ -77,6 +78,8 @@ static void __put_compound_page(struct page *page)
109039
109040 __page_cache_release(page);
109041 dtor = get_compound_page_dtor(page);
109042+ if (!PageHuge(page))
109043+ BUG_ON(dtor != free_compound_page);
109044 (*dtor)(page);
109045 }
109046
109047diff --git a/mm/swapfile.c b/mm/swapfile.c
109048index 63f55cc..31874e6 100644
109049--- a/mm/swapfile.c
109050+++ b/mm/swapfile.c
109051@@ -84,7 +84,7 @@ static DEFINE_MUTEX(swapon_mutex);
109052
109053 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
109054 /* Activity counter to indicate that a swapon or swapoff has occurred */
109055-static atomic_t proc_poll_event = ATOMIC_INIT(0);
109056+static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
109057
109058 static inline unsigned char swap_count(unsigned char ent)
109059 {
109060@@ -1944,7 +1944,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
109061 spin_unlock(&swap_lock);
109062
109063 err = 0;
109064- atomic_inc(&proc_poll_event);
109065+ atomic_inc_unchecked(&proc_poll_event);
109066 wake_up_interruptible(&proc_poll_wait);
109067
109068 out_dput:
109069@@ -1961,8 +1961,8 @@ static unsigned swaps_poll(struct file *file, poll_table *wait)
109070
109071 poll_wait(file, &proc_poll_wait, wait);
109072
109073- if (seq->poll_event != atomic_read(&proc_poll_event)) {
109074- seq->poll_event = atomic_read(&proc_poll_event);
109075+ if (seq->poll_event != atomic_read_unchecked(&proc_poll_event)) {
109076+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
109077 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
109078 }
109079
109080@@ -2060,7 +2060,7 @@ static int swaps_open(struct inode *inode, struct file *file)
109081 return ret;
109082
109083 seq = file->private_data;
109084- seq->poll_event = atomic_read(&proc_poll_event);
109085+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
109086 return 0;
109087 }
109088
109089@@ -2520,7 +2520,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
109090 (frontswap_map) ? "FS" : "");
109091
109092 mutex_unlock(&swapon_mutex);
109093- atomic_inc(&proc_poll_event);
109094+ atomic_inc_unchecked(&proc_poll_event);
109095 wake_up_interruptible(&proc_poll_wait);
109096
109097 if (S_ISREG(inode->i_mode))
109098diff --git a/mm/util.c b/mm/util.c
109099index 3981ae9..28b585b 100644
109100--- a/mm/util.c
109101+++ b/mm/util.c
109102@@ -233,6 +233,12 @@ struct task_struct *task_of_stack(struct task_struct *task,
109103 void arch_pick_mmap_layout(struct mm_struct *mm)
109104 {
109105 mm->mmap_base = TASK_UNMAPPED_BASE;
109106+
109107+#ifdef CONFIG_PAX_RANDMMAP
109108+ if (mm->pax_flags & MF_PAX_RANDMMAP)
109109+ mm->mmap_base += mm->delta_mmap;
109110+#endif
109111+
109112 mm->get_unmapped_area = arch_get_unmapped_area;
109113 }
109114 #endif
109115@@ -403,6 +409,9 @@ int get_cmdline(struct task_struct *task, char *buffer, int buflen)
109116 if (!mm->arg_end)
109117 goto out_mm; /* Shh! No looking before we're done */
109118
109119+ if (gr_acl_handle_procpidmem(task))
109120+ goto out_mm;
109121+
109122 len = mm->arg_end - mm->arg_start;
109123
109124 if (len > buflen)
109125diff --git a/mm/vmalloc.c b/mm/vmalloc.c
109126index 49abccf..7bd1931 100644
109127--- a/mm/vmalloc.c
109128+++ b/mm/vmalloc.c
109129@@ -39,20 +39,65 @@ struct vfree_deferred {
109130 struct work_struct wq;
109131 };
109132 static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred);
109133+static DEFINE_PER_CPU(struct vfree_deferred, vunmap_deferred);
109134+
109135+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
109136+struct stack_deferred_llist {
109137+ struct llist_head list;
109138+ void *stack;
109139+ void *lowmem_stack;
109140+};
109141+
109142+struct stack_deferred {
109143+ struct stack_deferred_llist list;
109144+ struct work_struct wq;
109145+};
109146+
109147+static DEFINE_PER_CPU(struct stack_deferred, stack_deferred);
109148+#endif
109149
109150 static void __vunmap(const void *, int);
109151
109152-static void free_work(struct work_struct *w)
109153+static void vfree_work(struct work_struct *w)
109154 {
109155 struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq);
109156 struct llist_node *llnode = llist_del_all(&p->list);
109157 while (llnode) {
109158- void *p = llnode;
109159+ void *x = llnode;
109160 llnode = llist_next(llnode);
109161- __vunmap(p, 1);
109162+ __vunmap(x, 1);
109163 }
109164 }
109165
109166+static void vunmap_work(struct work_struct *w)
109167+{
109168+ struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq);
109169+ struct llist_node *llnode = llist_del_all(&p->list);
109170+ while (llnode) {
109171+ void *x = llnode;
109172+ llnode = llist_next(llnode);
109173+ __vunmap(x, 0);
109174+ }
109175+}
109176+
109177+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
109178+static void unmap_work(struct work_struct *w)
109179+{
109180+ struct stack_deferred *p = container_of(w, struct stack_deferred, wq);
109181+ struct llist_node *llnode = llist_del_all(&p->list.list);
109182+ while (llnode) {
109183+ struct stack_deferred_llist *x =
109184+ llist_entry((struct llist_head *)llnode,
109185+ struct stack_deferred_llist, list);
109186+ void *stack = ACCESS_ONCE(x->stack);
109187+ void *lowmem_stack = ACCESS_ONCE(x->lowmem_stack);
109188+ llnode = llist_next(llnode);
109189+ __vunmap(stack, 0);
109190+ free_kmem_pages((unsigned long)lowmem_stack, THREAD_SIZE_ORDER);
109191+ }
109192+}
109193+#endif
109194+
109195 /*** Page table manipulation functions ***/
109196
109197 static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
109198@@ -61,8 +106,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
109199
109200 pte = pte_offset_kernel(pmd, addr);
109201 do {
109202- pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
109203- WARN_ON(!pte_none(ptent) && !pte_present(ptent));
109204+
109205+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
109206+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
109207+ BUG_ON(!pte_exec(*pte));
109208+ set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
109209+ continue;
109210+ }
109211+#endif
109212+
109213+ {
109214+ pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
109215+ WARN_ON(!pte_none(ptent) && !pte_present(ptent));
109216+ }
109217 } while (pte++, addr += PAGE_SIZE, addr != end);
109218 }
109219
109220@@ -122,16 +178,29 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
109221 pte = pte_alloc_kernel(pmd, addr);
109222 if (!pte)
109223 return -ENOMEM;
109224+
109225+ pax_open_kernel();
109226 do {
109227 struct page *page = pages[*nr];
109228
109229- if (WARN_ON(!pte_none(*pte)))
109230+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
109231+ if (pgprot_val(prot) & _PAGE_NX)
109232+#endif
109233+
109234+ if (!pte_none(*pte)) {
109235+ pax_close_kernel();
109236+ WARN_ON(1);
109237 return -EBUSY;
109238- if (WARN_ON(!page))
109239+ }
109240+ if (!page) {
109241+ pax_close_kernel();
109242+ WARN_ON(1);
109243 return -ENOMEM;
109244+ }
109245 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
109246 (*nr)++;
109247 } while (pte++, addr += PAGE_SIZE, addr != end);
109248+ pax_close_kernel();
109249 return 0;
109250 }
109251
109252@@ -141,7 +210,7 @@ static int vmap_pmd_range(pud_t *pud, unsigned long addr,
109253 pmd_t *pmd;
109254 unsigned long next;
109255
109256- pmd = pmd_alloc(&init_mm, pud, addr);
109257+ pmd = pmd_alloc_kernel(&init_mm, pud, addr);
109258 if (!pmd)
109259 return -ENOMEM;
109260 do {
109261@@ -158,7 +227,7 @@ static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
109262 pud_t *pud;
109263 unsigned long next;
109264
109265- pud = pud_alloc(&init_mm, pgd, addr);
109266+ pud = pud_alloc_kernel(&init_mm, pgd, addr);
109267 if (!pud)
109268 return -ENOMEM;
109269 do {
109270@@ -218,6 +287,12 @@ int is_vmalloc_or_module_addr(const void *x)
109271 if (addr >= MODULES_VADDR && addr < MODULES_END)
109272 return 1;
109273 #endif
109274+
109275+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
109276+ if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
109277+ return 1;
109278+#endif
109279+
109280 return is_vmalloc_addr(x);
109281 }
109282
109283@@ -238,8 +313,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
109284
109285 if (!pgd_none(*pgd)) {
109286 pud_t *pud = pud_offset(pgd, addr);
109287+#ifdef CONFIG_X86
109288+ if (!pud_large(*pud))
109289+#endif
109290 if (!pud_none(*pud)) {
109291 pmd_t *pmd = pmd_offset(pud, addr);
109292+#ifdef CONFIG_X86
109293+ if (!pmd_large(*pmd))
109294+#endif
109295 if (!pmd_none(*pmd)) {
109296 pte_t *ptep, pte;
109297
109298@@ -341,7 +422,7 @@ static void purge_vmap_area_lazy(void);
109299 * Allocate a region of KVA of the specified size and alignment, within the
109300 * vstart and vend.
109301 */
109302-static struct vmap_area *alloc_vmap_area(unsigned long size,
109303+static struct vmap_area * __size_overflow(1) alloc_vmap_area(unsigned long size,
109304 unsigned long align,
109305 unsigned long vstart, unsigned long vend,
109306 int node, gfp_t gfp_mask)
109307@@ -1182,13 +1263,27 @@ void __init vmalloc_init(void)
109308 for_each_possible_cpu(i) {
109309 struct vmap_block_queue *vbq;
109310 struct vfree_deferred *p;
109311+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
109312+ struct stack_deferred *p2;
109313+#endif
109314
109315 vbq = &per_cpu(vmap_block_queue, i);
109316 spin_lock_init(&vbq->lock);
109317 INIT_LIST_HEAD(&vbq->free);
109318+
109319 p = &per_cpu(vfree_deferred, i);
109320 init_llist_head(&p->list);
109321- INIT_WORK(&p->wq, free_work);
109322+ INIT_WORK(&p->wq, vfree_work);
109323+
109324+ p = &per_cpu(vunmap_deferred, i);
109325+ init_llist_head(&p->list);
109326+ INIT_WORK(&p->wq, vunmap_work);
109327+
109328+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
109329+ p2 = &per_cpu(stack_deferred, i);
109330+ init_llist_head(&p2->list.list);
109331+ INIT_WORK(&p2->wq, unmap_work);
109332+#endif
109333 }
109334
109335 /* Import existing vmlist entries. */
109336@@ -1313,6 +1408,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
109337 struct vm_struct *area;
109338
109339 BUG_ON(in_interrupt());
109340+
109341+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
109342+ if (flags & VM_KERNEXEC) {
109343+ if (start != VMALLOC_START || end != VMALLOC_END)
109344+ return NULL;
109345+ start = (unsigned long)MODULES_EXEC_VADDR;
109346+ end = (unsigned long)MODULES_EXEC_END;
109347+ }
109348+#endif
109349+
109350 if (flags & VM_IOREMAP)
109351 align = 1ul << clamp(fls(size), PAGE_SHIFT, IOREMAP_MAX_ORDER);
109352
109353@@ -1510,13 +1615,36 @@ EXPORT_SYMBOL(vfree);
109354 */
109355 void vunmap(const void *addr)
109356 {
109357- BUG_ON(in_interrupt());
109358- might_sleep();
109359- if (addr)
109360+ if (!addr)
109361+ return;
109362+ if (unlikely(in_interrupt())) {
109363+ struct vfree_deferred *p = this_cpu_ptr(&vunmap_deferred);
109364+ if (llist_add((struct llist_node *)addr, &p->list))
109365+ schedule_work(&p->wq);
109366+ } else {
109367+ might_sleep();
109368 __vunmap(addr, 0);
109369+ }
109370 }
109371 EXPORT_SYMBOL(vunmap);
109372
109373+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
109374+void unmap_process_stacks(struct task_struct *task)
109375+{
109376+ if (unlikely(in_interrupt())) {
109377+ struct stack_deferred *p = this_cpu_ptr(&stack_deferred);
109378+ struct stack_deferred_llist *list = task->stack;
109379+ list->stack = task->stack;
109380+ list->lowmem_stack = task->lowmem_stack;
109381+ if (llist_add((struct llist_node *)&list->list, &p->list.list))
109382+ schedule_work(&p->wq);
109383+ } else {
109384+ __vunmap(task->stack, 0);
109385+ free_kmem_pages((unsigned long)task->lowmem_stack, THREAD_SIZE_ORDER);
109386+ }
109387+}
109388+#endif
109389+
109390 /**
109391 * vmap - map an array of pages into virtually contiguous space
109392 * @pages: array of page pointers
109393@@ -1537,6 +1665,11 @@ void *vmap(struct page **pages, unsigned int count,
109394 if (count > totalram_pages)
109395 return NULL;
109396
109397+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
109398+ if (!(pgprot_val(prot) & _PAGE_NX))
109399+ flags |= VM_KERNEXEC;
109400+#endif
109401+
109402 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
109403 __builtin_return_address(0));
109404 if (!area)
109405@@ -1641,6 +1774,14 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
109406 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
109407 goto fail;
109408
109409+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
109410+ if (!(pgprot_val(prot) & _PAGE_NX)) {
109411+ vm_flags |= VM_KERNEXEC;
109412+ start = VMALLOC_START;
109413+ end = VMALLOC_END;
109414+ }
109415+#endif
109416+
109417 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED |
109418 vm_flags, start, end, node, gfp_mask, caller);
109419 if (!area)
109420@@ -1817,10 +1958,9 @@ EXPORT_SYMBOL(vzalloc_node);
109421 * For tight control over page level allocator and protection flags
109422 * use __vmalloc() instead.
109423 */
109424-
109425 void *vmalloc_exec(unsigned long size)
109426 {
109427- return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
109428+ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
109429 NUMA_NO_NODE, __builtin_return_address(0));
109430 }
109431
109432@@ -2127,6 +2267,8 @@ int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
109433 {
109434 struct vm_struct *area;
109435
109436+ BUG_ON(vma->vm_mirror);
109437+
109438 size = PAGE_ALIGN(size);
109439
109440 if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr))
109441@@ -2609,7 +2751,11 @@ static int s_show(struct seq_file *m, void *p)
109442 v->addr, v->addr + v->size, v->size);
109443
109444 if (v->caller)
109445+#ifdef CONFIG_GRKERNSEC_HIDESYM
109446+ seq_printf(m, " %pK", v->caller);
109447+#else
109448 seq_printf(m, " %pS", v->caller);
109449+#endif
109450
109451 if (v->nr_pages)
109452 seq_printf(m, " pages=%d", v->nr_pages);
109453diff --git a/mm/vmstat.c b/mm/vmstat.c
109454index 4f5cd97..9fb715a 100644
109455--- a/mm/vmstat.c
109456+++ b/mm/vmstat.c
109457@@ -27,6 +27,7 @@
109458 #include <linux/mm_inline.h>
109459 #include <linux/page_ext.h>
109460 #include <linux/page_owner.h>
109461+#include <linux/grsecurity.h>
109462
109463 #include "internal.h"
109464
109465@@ -86,7 +87,7 @@ void vm_events_fold_cpu(int cpu)
109466 *
109467 * vm_stat contains the global counters
109468 */
109469-atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
109470+atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
109471 EXPORT_SYMBOL(vm_stat);
109472
109473 #ifdef CONFIG_SMP
109474@@ -438,7 +439,7 @@ static int fold_diff(int *diff)
109475
109476 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
109477 if (diff[i]) {
109478- atomic_long_add(diff[i], &vm_stat[i]);
109479+ atomic_long_add_unchecked(diff[i], &vm_stat[i]);
109480 changes++;
109481 }
109482 return changes;
109483@@ -476,7 +477,7 @@ static int refresh_cpu_vm_stats(void)
109484 v = this_cpu_xchg(p->vm_stat_diff[i], 0);
109485 if (v) {
109486
109487- atomic_long_add(v, &zone->vm_stat[i]);
109488+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
109489 global_diff[i] += v;
109490 #ifdef CONFIG_NUMA
109491 /* 3 seconds idle till flush */
109492@@ -540,7 +541,7 @@ void cpu_vm_stats_fold(int cpu)
109493
109494 v = p->vm_stat_diff[i];
109495 p->vm_stat_diff[i] = 0;
109496- atomic_long_add(v, &zone->vm_stat[i]);
109497+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
109498 global_diff[i] += v;
109499 }
109500 }
109501@@ -560,8 +561,8 @@ void drain_zonestat(struct zone *zone, struct per_cpu_pageset *pset)
109502 if (pset->vm_stat_diff[i]) {
109503 int v = pset->vm_stat_diff[i];
109504 pset->vm_stat_diff[i] = 0;
109505- atomic_long_add(v, &zone->vm_stat[i]);
109506- atomic_long_add(v, &vm_stat[i]);
109507+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
109508+ atomic_long_add_unchecked(v, &vm_stat[i]);
109509 }
109510 }
109511 #endif
109512@@ -1293,10 +1294,22 @@ static void *vmstat_start(struct seq_file *m, loff_t *pos)
109513 stat_items_size += sizeof(struct vm_event_state);
109514 #endif
109515
109516- v = kmalloc(stat_items_size, GFP_KERNEL);
109517+ v = kzalloc(stat_items_size, GFP_KERNEL);
109518 m->private = v;
109519 if (!v)
109520 return ERR_PTR(-ENOMEM);
109521+
109522+#ifdef CONFIG_GRKERNSEC_PROC_ADD
109523+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
109524+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID)
109525+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
109526+ && !in_group_p(grsec_proc_gid)
109527+#endif
109528+ )
109529+ return (unsigned long *)m->private + *pos;
109530+#endif
109531+#endif
109532+
109533 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
109534 v[i] = global_page_state(i);
109535 v += NR_VM_ZONE_STAT_ITEMS;
109536@@ -1528,10 +1541,16 @@ static int __init setup_vmstat(void)
109537 cpu_notifier_register_done();
109538 #endif
109539 #ifdef CONFIG_PROC_FS
109540- proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
109541- proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
109542- proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
109543- proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
109544+ {
109545+ mode_t gr_mode = S_IRUGO;
109546+#ifdef CONFIG_GRKERNSEC_PROC_ADD
109547+ gr_mode = S_IRUSR;
109548+#endif
109549+ proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
109550+ proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
109551+ proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
109552+ proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
109553+ }
109554 #endif
109555 return 0;
109556 }
109557diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
109558index 64c6bed..b79a5de 100644
109559--- a/net/8021q/vlan.c
109560+++ b/net/8021q/vlan.c
109561@@ -481,7 +481,7 @@ out:
109562 return NOTIFY_DONE;
109563 }
109564
109565-static struct notifier_block vlan_notifier_block __read_mostly = {
109566+static struct notifier_block vlan_notifier_block = {
109567 .notifier_call = vlan_device_event,
109568 };
109569
109570@@ -556,8 +556,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
109571 err = -EPERM;
109572 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
109573 break;
109574- if ((args.u.name_type >= 0) &&
109575- (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
109576+ if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
109577 struct vlan_net *vn;
109578
109579 vn = net_generic(net, vlan_net_id);
109580diff --git a/net/8021q/vlan_netlink.c b/net/8021q/vlan_netlink.c
109581index c92b52f..006c052 100644
109582--- a/net/8021q/vlan_netlink.c
109583+++ b/net/8021q/vlan_netlink.c
109584@@ -245,7 +245,7 @@ static struct net *vlan_get_link_net(const struct net_device *dev)
109585 return dev_net(real_dev);
109586 }
109587
109588-struct rtnl_link_ops vlan_link_ops __read_mostly = {
109589+struct rtnl_link_ops vlan_link_ops = {
109590 .kind = "vlan",
109591 .maxtype = IFLA_VLAN_MAX,
109592 .policy = vlan_policy,
109593diff --git a/net/9p/client.c b/net/9p/client.c
109594index e86a9bea..e91f70e 100644
109595--- a/net/9p/client.c
109596+++ b/net/9p/client.c
109597@@ -596,7 +596,7 @@ static int p9_check_zc_errors(struct p9_client *c, struct p9_req_t *req,
109598 len - inline_len);
109599 } else {
109600 err = copy_from_user(ename + inline_len,
109601- uidata, len - inline_len);
109602+ (char __force_user *)uidata, len - inline_len);
109603 if (err) {
109604 err = -EFAULT;
109605 goto out_err;
109606@@ -1570,7 +1570,7 @@ p9_client_read(struct p9_fid *fid, char *data, char __user *udata, u64 offset,
109607 kernel_buf = 1;
109608 indata = data;
109609 } else
109610- indata = (__force char *)udata;
109611+ indata = (__force_kernel char *)udata;
109612 /*
109613 * response header len is 11
109614 * PDU Header(7) + IO Size (4)
109615@@ -1645,7 +1645,7 @@ p9_client_write(struct p9_fid *fid, char *data, const char __user *udata,
109616 kernel_buf = 1;
109617 odata = data;
109618 } else
109619- odata = (char *)udata;
109620+ odata = (char __force_kernel *)udata;
109621 req = p9_client_zc_rpc(clnt, P9_TWRITE, NULL, odata, 0, rsize,
109622 P9_ZC_HDR_SZ, kernel_buf, "dqd",
109623 fid->fid, offset, rsize);
109624diff --git a/net/9p/mod.c b/net/9p/mod.c
109625index 6ab36ae..6f1841b 100644
109626--- a/net/9p/mod.c
109627+++ b/net/9p/mod.c
109628@@ -84,7 +84,7 @@ static LIST_HEAD(v9fs_trans_list);
109629 void v9fs_register_trans(struct p9_trans_module *m)
109630 {
109631 spin_lock(&v9fs_trans_lock);
109632- list_add_tail(&m->list, &v9fs_trans_list);
109633+ pax_list_add_tail((struct list_head *)&m->list, &v9fs_trans_list);
109634 spin_unlock(&v9fs_trans_lock);
109635 }
109636 EXPORT_SYMBOL(v9fs_register_trans);
109637@@ -97,7 +97,7 @@ EXPORT_SYMBOL(v9fs_register_trans);
109638 void v9fs_unregister_trans(struct p9_trans_module *m)
109639 {
109640 spin_lock(&v9fs_trans_lock);
109641- list_del_init(&m->list);
109642+ pax_list_del_init((struct list_head *)&m->list);
109643 spin_unlock(&v9fs_trans_lock);
109644 }
109645 EXPORT_SYMBOL(v9fs_unregister_trans);
109646diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
109647index 80d08f6..de63fd1 100644
109648--- a/net/9p/trans_fd.c
109649+++ b/net/9p/trans_fd.c
109650@@ -428,7 +428,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
109651 oldfs = get_fs();
109652 set_fs(get_ds());
109653 /* The cast to a user pointer is valid due to the set_fs() */
109654- ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
109655+ ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
109656 set_fs(oldfs);
109657
109658 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
109659diff --git a/net/appletalk/atalk_proc.c b/net/appletalk/atalk_proc.c
109660index af46bc4..f9adfcd 100644
109661--- a/net/appletalk/atalk_proc.c
109662+++ b/net/appletalk/atalk_proc.c
109663@@ -256,7 +256,7 @@ int __init atalk_proc_init(void)
109664 struct proc_dir_entry *p;
109665 int rc = -ENOMEM;
109666
109667- atalk_proc_dir = proc_mkdir("atalk", init_net.proc_net);
109668+ atalk_proc_dir = proc_mkdir_restrict("atalk", init_net.proc_net);
109669 if (!atalk_proc_dir)
109670 goto out;
109671
109672diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
109673index 876fbe8..8bbea9f 100644
109674--- a/net/atm/atm_misc.c
109675+++ b/net/atm/atm_misc.c
109676@@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int truesize)
109677 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
109678 return 1;
109679 atm_return(vcc, truesize);
109680- atomic_inc(&vcc->stats->rx_drop);
109681+ atomic_inc_unchecked(&vcc->stats->rx_drop);
109682 return 0;
109683 }
109684 EXPORT_SYMBOL(atm_charge);
109685@@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc, int pdu_size,
109686 }
109687 }
109688 atm_return(vcc, guess);
109689- atomic_inc(&vcc->stats->rx_drop);
109690+ atomic_inc_unchecked(&vcc->stats->rx_drop);
109691 return NULL;
109692 }
109693 EXPORT_SYMBOL(atm_alloc_charge);
109694@@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
109695
109696 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
109697 {
109698-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
109699+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
109700 __SONET_ITEMS
109701 #undef __HANDLE_ITEM
109702 }
109703@@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
109704
109705 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
109706 {
109707-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
109708+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
109709 __SONET_ITEMS
109710 #undef __HANDLE_ITEM
109711 }
109712diff --git a/net/atm/lec.c b/net/atm/lec.c
109713index 4b98f89..5a2f6cb 100644
109714--- a/net/atm/lec.c
109715+++ b/net/atm/lec.c
109716@@ -111,9 +111,9 @@ static inline void lec_arp_put(struct lec_arp_table *entry)
109717 }
109718
109719 static struct lane2_ops lane2_ops = {
109720- lane2_resolve, /* resolve, spec 3.1.3 */
109721- lane2_associate_req, /* associate_req, spec 3.1.4 */
109722- NULL /* associate indicator, spec 3.1.5 */
109723+ .resolve = lane2_resolve,
109724+ .associate_req = lane2_associate_req,
109725+ .associate_indicator = NULL
109726 };
109727
109728 static unsigned char bus_mac[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
109729diff --git a/net/atm/lec.h b/net/atm/lec.h
109730index 4149db1..f2ab682 100644
109731--- a/net/atm/lec.h
109732+++ b/net/atm/lec.h
109733@@ -48,7 +48,7 @@ struct lane2_ops {
109734 const u8 *tlvs, u32 sizeoftlvs);
109735 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
109736 const u8 *tlvs, u32 sizeoftlvs);
109737-};
109738+} __no_const;
109739
109740 /*
109741 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
109742diff --git a/net/atm/mpoa_caches.c b/net/atm/mpoa_caches.c
109743index d1b2d9a..d549f7f 100644
109744--- a/net/atm/mpoa_caches.c
109745+++ b/net/atm/mpoa_caches.c
109746@@ -535,30 +535,30 @@ static void eg_destroy_cache(struct mpoa_client *mpc)
109747
109748
109749 static struct in_cache_ops ingress_ops = {
109750- in_cache_add_entry, /* add_entry */
109751- in_cache_get, /* get */
109752- in_cache_get_with_mask, /* get_with_mask */
109753- in_cache_get_by_vcc, /* get_by_vcc */
109754- in_cache_put, /* put */
109755- in_cache_remove_entry, /* remove_entry */
109756- cache_hit, /* cache_hit */
109757- clear_count_and_expired, /* clear_count */
109758- check_resolving_entries, /* check_resolving */
109759- refresh_entries, /* refresh */
109760- in_destroy_cache /* destroy_cache */
109761+ .add_entry = in_cache_add_entry,
109762+ .get = in_cache_get,
109763+ .get_with_mask = in_cache_get_with_mask,
109764+ .get_by_vcc = in_cache_get_by_vcc,
109765+ .put = in_cache_put,
109766+ .remove_entry = in_cache_remove_entry,
109767+ .cache_hit = cache_hit,
109768+ .clear_count = clear_count_and_expired,
109769+ .check_resolving = check_resolving_entries,
109770+ .refresh = refresh_entries,
109771+ .destroy_cache = in_destroy_cache
109772 };
109773
109774 static struct eg_cache_ops egress_ops = {
109775- eg_cache_add_entry, /* add_entry */
109776- eg_cache_get_by_cache_id, /* get_by_cache_id */
109777- eg_cache_get_by_tag, /* get_by_tag */
109778- eg_cache_get_by_vcc, /* get_by_vcc */
109779- eg_cache_get_by_src_ip, /* get_by_src_ip */
109780- eg_cache_put, /* put */
109781- eg_cache_remove_entry, /* remove_entry */
109782- update_eg_cache_entry, /* update */
109783- clear_expired, /* clear_expired */
109784- eg_destroy_cache /* destroy_cache */
109785+ .add_entry = eg_cache_add_entry,
109786+ .get_by_cache_id = eg_cache_get_by_cache_id,
109787+ .get_by_tag = eg_cache_get_by_tag,
109788+ .get_by_vcc = eg_cache_get_by_vcc,
109789+ .get_by_src_ip = eg_cache_get_by_src_ip,
109790+ .put = eg_cache_put,
109791+ .remove_entry = eg_cache_remove_entry,
109792+ .update = update_eg_cache_entry,
109793+ .clear_expired = clear_expired,
109794+ .destroy_cache = eg_destroy_cache
109795 };
109796
109797
109798diff --git a/net/atm/proc.c b/net/atm/proc.c
109799index bbb6461..cf04016 100644
109800--- a/net/atm/proc.c
109801+++ b/net/atm/proc.c
109802@@ -45,9 +45,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
109803 const struct k_atm_aal_stats *stats)
109804 {
109805 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
109806- atomic_read(&stats->tx), atomic_read(&stats->tx_err),
109807- atomic_read(&stats->rx), atomic_read(&stats->rx_err),
109808- atomic_read(&stats->rx_drop));
109809+ atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
109810+ atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
109811+ atomic_read_unchecked(&stats->rx_drop));
109812 }
109813
109814 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
109815diff --git a/net/atm/resources.c b/net/atm/resources.c
109816index 0447d5d..3cf4728 100644
109817--- a/net/atm/resources.c
109818+++ b/net/atm/resources.c
109819@@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
109820 static void copy_aal_stats(struct k_atm_aal_stats *from,
109821 struct atm_aal_stats *to)
109822 {
109823-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
109824+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
109825 __AAL_STAT_ITEMS
109826 #undef __HANDLE_ITEM
109827 }
109828@@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
109829 static void subtract_aal_stats(struct k_atm_aal_stats *from,
109830 struct atm_aal_stats *to)
109831 {
109832-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
109833+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
109834 __AAL_STAT_ITEMS
109835 #undef __HANDLE_ITEM
109836 }
109837diff --git a/net/ax25/sysctl_net_ax25.c b/net/ax25/sysctl_net_ax25.c
109838index 919a5ce..cc6b444 100644
109839--- a/net/ax25/sysctl_net_ax25.c
109840+++ b/net/ax25/sysctl_net_ax25.c
109841@@ -152,7 +152,7 @@ int ax25_register_dev_sysctl(ax25_dev *ax25_dev)
109842 {
109843 char path[sizeof("net/ax25/") + IFNAMSIZ];
109844 int k;
109845- struct ctl_table *table;
109846+ ctl_table_no_const *table;
109847
109848 table = kmemdup(ax25_param_table, sizeof(ax25_param_table), GFP_KERNEL);
109849 if (!table)
109850diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
109851index 00e00e0..710fcd2 100644
109852--- a/net/batman-adv/bat_iv_ogm.c
109853+++ b/net/batman-adv/bat_iv_ogm.c
109854@@ -312,7 +312,7 @@ static int batadv_iv_ogm_iface_enable(struct batadv_hard_iface *hard_iface)
109855
109856 /* randomize initial seqno to avoid collision */
109857 get_random_bytes(&random_seqno, sizeof(random_seqno));
109858- atomic_set(&hard_iface->bat_iv.ogm_seqno, random_seqno);
109859+ atomic_set_unchecked(&hard_iface->bat_iv.ogm_seqno, random_seqno);
109860
109861 hard_iface->bat_iv.ogm_buff_len = BATADV_OGM_HLEN;
109862 ogm_buff = kmalloc(hard_iface->bat_iv.ogm_buff_len, GFP_ATOMIC);
109863@@ -917,9 +917,9 @@ static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
109864 batadv_ogm_packet->tvlv_len = htons(tvlv_len);
109865
109866 /* change sequence number to network order */
109867- seqno = (uint32_t)atomic_read(&hard_iface->bat_iv.ogm_seqno);
109868+ seqno = (uint32_t)atomic_read_unchecked(&hard_iface->bat_iv.ogm_seqno);
109869 batadv_ogm_packet->seqno = htonl(seqno);
109870- atomic_inc(&hard_iface->bat_iv.ogm_seqno);
109871+ atomic_inc_unchecked(&hard_iface->bat_iv.ogm_seqno);
109872
109873 batadv_iv_ogm_slide_own_bcast_window(hard_iface);
109874
109875@@ -1596,7 +1596,7 @@ static void batadv_iv_ogm_process(const struct sk_buff *skb, int ogm_offset,
109876 return;
109877
109878 /* could be changed by schedule_own_packet() */
109879- if_incoming_seqno = atomic_read(&if_incoming->bat_iv.ogm_seqno);
109880+ if_incoming_seqno = atomic_read_unchecked(&if_incoming->bat_iv.ogm_seqno);
109881
109882 if (ogm_packet->flags & BATADV_DIRECTLINK)
109883 has_directlink_flag = true;
109884diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c
109885index 3d1dcaa..4699f4e 100644
109886--- a/net/batman-adv/fragmentation.c
109887+++ b/net/batman-adv/fragmentation.c
109888@@ -449,7 +449,7 @@ bool batadv_frag_send_packet(struct sk_buff *skb,
109889 frag_header.packet_type = BATADV_UNICAST_FRAG;
109890 frag_header.version = BATADV_COMPAT_VERSION;
109891 frag_header.ttl = BATADV_TTL;
109892- frag_header.seqno = htons(atomic_inc_return(&bat_priv->frag_seqno));
109893+ frag_header.seqno = htons(atomic_inc_return_unchecked(&bat_priv->frag_seqno));
109894 frag_header.reserved = 0;
109895 frag_header.no = 0;
109896 frag_header.total_size = htons(skb->len);
109897diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
109898index 5ec31d7..e371631 100644
109899--- a/net/batman-adv/soft-interface.c
109900+++ b/net/batman-adv/soft-interface.c
109901@@ -295,7 +295,7 @@ send:
109902 primary_if->net_dev->dev_addr);
109903
109904 /* set broadcast sequence number */
109905- seqno = atomic_inc_return(&bat_priv->bcast_seqno);
109906+ seqno = atomic_inc_return_unchecked(&bat_priv->bcast_seqno);
109907 bcast_packet->seqno = htonl(seqno);
109908
109909 batadv_add_bcast_packet_to_list(bat_priv, skb, brd_delay);
109910@@ -760,7 +760,7 @@ static int batadv_softif_init_late(struct net_device *dev)
109911 atomic_set(&bat_priv->batman_queue_left, BATADV_BATMAN_QUEUE_LEN);
109912
109913 atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
109914- atomic_set(&bat_priv->bcast_seqno, 1);
109915+ atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
109916 atomic_set(&bat_priv->tt.vn, 0);
109917 atomic_set(&bat_priv->tt.local_changes, 0);
109918 atomic_set(&bat_priv->tt.ogm_append_cnt, 0);
109919@@ -774,7 +774,7 @@ static int batadv_softif_init_late(struct net_device *dev)
109920
109921 /* randomize initial seqno to avoid collision */
109922 get_random_bytes(&random_seqno, sizeof(random_seqno));
109923- atomic_set(&bat_priv->frag_seqno, random_seqno);
109924+ atomic_set_unchecked(&bat_priv->frag_seqno, random_seqno);
109925
109926 bat_priv->primary_if = NULL;
109927 bat_priv->num_ifaces = 0;
109928@@ -982,7 +982,7 @@ int batadv_softif_is_valid(const struct net_device *net_dev)
109929 return 0;
109930 }
109931
109932-struct rtnl_link_ops batadv_link_ops __read_mostly = {
109933+struct rtnl_link_ops batadv_link_ops = {
109934 .kind = "batadv",
109935 .priv_size = sizeof(struct batadv_priv),
109936 .setup = batadv_softif_init_early,
109937diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
109938index 9398c3f..0e79657 100644
109939--- a/net/batman-adv/types.h
109940+++ b/net/batman-adv/types.h
109941@@ -67,7 +67,7 @@ enum batadv_dhcp_recipient {
109942 struct batadv_hard_iface_bat_iv {
109943 unsigned char *ogm_buff;
109944 int ogm_buff_len;
109945- atomic_t ogm_seqno;
109946+ atomic_unchecked_t ogm_seqno;
109947 };
109948
109949 /**
109950@@ -766,7 +766,7 @@ struct batadv_priv {
109951 atomic_t bonding;
109952 atomic_t fragmentation;
109953 atomic_t packet_size_max;
109954- atomic_t frag_seqno;
109955+ atomic_unchecked_t frag_seqno;
109956 #ifdef CONFIG_BATMAN_ADV_BLA
109957 atomic_t bridge_loop_avoidance;
109958 #endif
109959@@ -785,7 +785,7 @@ struct batadv_priv {
109960 #endif
109961 uint32_t isolation_mark;
109962 uint32_t isolation_mark_mask;
109963- atomic_t bcast_seqno;
109964+ atomic_unchecked_t bcast_seqno;
109965 atomic_t bcast_queue_left;
109966 atomic_t batman_queue_left;
109967 char num_ifaces;
109968diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
109969index 1d65c5b..43e55fd 100644
109970--- a/net/bluetooth/hci_sock.c
109971+++ b/net/bluetooth/hci_sock.c
109972@@ -1042,7 +1042,7 @@ static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
109973 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
109974 }
109975
109976- len = min_t(unsigned int, len, sizeof(uf));
109977+ len = min((size_t)len, sizeof(uf));
109978 if (copy_from_user(&uf, optval, len)) {
109979 err = -EFAULT;
109980 break;
109981diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
109982index 6ba33f9..4afc26f 100644
109983--- a/net/bluetooth/l2cap_core.c
109984+++ b/net/bluetooth/l2cap_core.c
109985@@ -3534,8 +3534,10 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
109986 break;
109987
109988 case L2CAP_CONF_RFC:
109989- if (olen == sizeof(rfc))
109990- memcpy(&rfc, (void *)val, olen);
109991+ if (olen != sizeof(rfc))
109992+ break;
109993+
109994+ memcpy(&rfc, (void *)val, olen);
109995
109996 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
109997 rfc.mode != chan->mode)
109998diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
109999index 60694f0..32623ed 100644
110000--- a/net/bluetooth/l2cap_sock.c
110001+++ b/net/bluetooth/l2cap_sock.c
110002@@ -633,7 +633,8 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
110003 struct sock *sk = sock->sk;
110004 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
110005 struct l2cap_options opts;
110006- int len, err = 0;
110007+ int err = 0;
110008+ size_t len = optlen;
110009 u32 opt;
110010
110011 BT_DBG("sk %p", sk);
110012@@ -660,7 +661,7 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
110013 opts.max_tx = chan->max_tx;
110014 opts.txwin_size = chan->tx_win;
110015
110016- len = min_t(unsigned int, sizeof(opts), optlen);
110017+ len = min(sizeof(opts), len);
110018 if (copy_from_user((char *) &opts, optval, len)) {
110019 err = -EFAULT;
110020 break;
110021@@ -747,7 +748,8 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
110022 struct bt_security sec;
110023 struct bt_power pwr;
110024 struct l2cap_conn *conn;
110025- int len, err = 0;
110026+ int err = 0;
110027+ size_t len = optlen;
110028 u32 opt;
110029
110030 BT_DBG("sk %p", sk);
110031@@ -771,7 +773,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
110032
110033 sec.level = BT_SECURITY_LOW;
110034
110035- len = min_t(unsigned int, sizeof(sec), optlen);
110036+ len = min(sizeof(sec), len);
110037 if (copy_from_user((char *) &sec, optval, len)) {
110038 err = -EFAULT;
110039 break;
110040@@ -867,7 +869,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
110041
110042 pwr.force_active = BT_POWER_FORCE_ACTIVE_ON;
110043
110044- len = min_t(unsigned int, sizeof(pwr), optlen);
110045+ len = min(sizeof(pwr), len);
110046 if (copy_from_user((char *) &pwr, optval, len)) {
110047 err = -EFAULT;
110048 break;
110049diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
110050index 3c6d2c8..6afc970 100644
110051--- a/net/bluetooth/rfcomm/sock.c
110052+++ b/net/bluetooth/rfcomm/sock.c
110053@@ -686,7 +686,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
110054 struct sock *sk = sock->sk;
110055 struct bt_security sec;
110056 int err = 0;
110057- size_t len;
110058+ size_t len = optlen;
110059 u32 opt;
110060
110061 BT_DBG("sk %p", sk);
110062@@ -708,7 +708,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
110063
110064 sec.level = BT_SECURITY_LOW;
110065
110066- len = min_t(unsigned int, sizeof(sec), optlen);
110067+ len = min(sizeof(sec), len);
110068 if (copy_from_user((char *) &sec, optval, len)) {
110069 err = -EFAULT;
110070 break;
110071diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
110072index 8e385a0..a5bdd8e 100644
110073--- a/net/bluetooth/rfcomm/tty.c
110074+++ b/net/bluetooth/rfcomm/tty.c
110075@@ -752,7 +752,7 @@ static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp)
110076 BT_DBG("tty %p id %d", tty, tty->index);
110077
110078 BT_DBG("dev %p dst %pMR channel %d opened %d", dev, &dev->dst,
110079- dev->channel, dev->port.count);
110080+ dev->channel, atomic_read(&dev->port.count));
110081
110082 err = tty_port_open(&dev->port, tty, filp);
110083 if (err)
110084@@ -775,7 +775,7 @@ static void rfcomm_tty_close(struct tty_struct *tty, struct file *filp)
110085 struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data;
110086
110087 BT_DBG("tty %p dev %p dlc %p opened %d", tty, dev, dev->dlc,
110088- dev->port.count);
110089+ atomic_read(&dev->port.count));
110090
110091 tty_port_close(&dev->port, tty, filp);
110092 }
110093diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c
110094index 4096089..c602d26 100644
110095--- a/net/bridge/br_mdb.c
110096+++ b/net/bridge/br_mdb.c
110097@@ -371,6 +371,7 @@ static int __br_mdb_add(struct net *net, struct net_bridge *br,
110098 if (!p || p->br != br || p->state == BR_STATE_DISABLED)
110099 return -EINVAL;
110100
110101+ memset(&ip, 0, sizeof(ip));
110102 ip.proto = entry->addr.proto;
110103 if (ip.proto == htons(ETH_P_IP))
110104 ip.u.ip4 = entry->addr.u.ip4;
110105@@ -417,6 +418,7 @@ static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry)
110106 if (!netif_running(br->dev) || br->multicast_disabled)
110107 return -EINVAL;
110108
110109+ memset(&ip, 0, sizeof(ip));
110110 ip.proto = entry->addr.proto;
110111 if (ip.proto == htons(ETH_P_IP)) {
110112 if (timer_pending(&br->ip4_other_query.timer))
110113diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
110114index 4fbcea0..69a6786 100644
110115--- a/net/bridge/br_netlink.c
110116+++ b/net/bridge/br_netlink.c
110117@@ -726,7 +726,7 @@ static struct rtnl_af_ops br_af_ops __read_mostly = {
110118 .get_link_af_size = br_get_link_af_size,
110119 };
110120
110121-struct rtnl_link_ops br_link_ops __read_mostly = {
110122+struct rtnl_link_ops br_link_ops = {
110123 .kind = "bridge",
110124 .priv_size = sizeof(struct net_bridge),
110125 .setup = br_dev_setup,
110126diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
110127index 91180a7..1301daa 100644
110128--- a/net/bridge/netfilter/ebtables.c
110129+++ b/net/bridge/netfilter/ebtables.c
110130@@ -1533,7 +1533,7 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
110131 tmp.valid_hooks = t->table->valid_hooks;
110132 }
110133 mutex_unlock(&ebt_mutex);
110134- if (copy_to_user(user, &tmp, *len) != 0) {
110135+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
110136 BUGPRINT("c2u Didn't work\n");
110137 ret = -EFAULT;
110138 break;
110139@@ -2339,7 +2339,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
110140 goto out;
110141 tmp.valid_hooks = t->valid_hooks;
110142
110143- if (copy_to_user(user, &tmp, *len) != 0) {
110144+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
110145 ret = -EFAULT;
110146 break;
110147 }
110148@@ -2350,7 +2350,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
110149 tmp.entries_size = t->table->entries_size;
110150 tmp.valid_hooks = t->table->valid_hooks;
110151
110152- if (copy_to_user(user, &tmp, *len) != 0) {
110153+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
110154 ret = -EFAULT;
110155 break;
110156 }
110157diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
110158index f5afda1..dcf770a 100644
110159--- a/net/caif/cfctrl.c
110160+++ b/net/caif/cfctrl.c
110161@@ -10,6 +10,7 @@
110162 #include <linux/spinlock.h>
110163 #include <linux/slab.h>
110164 #include <linux/pkt_sched.h>
110165+#include <linux/sched.h>
110166 #include <net/caif/caif_layer.h>
110167 #include <net/caif/cfpkt.h>
110168 #include <net/caif/cfctrl.h>
110169@@ -43,8 +44,8 @@ struct cflayer *cfctrl_create(void)
110170 memset(&dev_info, 0, sizeof(dev_info));
110171 dev_info.id = 0xff;
110172 cfsrvl_init(&this->serv, 0, &dev_info, false);
110173- atomic_set(&this->req_seq_no, 1);
110174- atomic_set(&this->rsp_seq_no, 1);
110175+ atomic_set_unchecked(&this->req_seq_no, 1);
110176+ atomic_set_unchecked(&this->rsp_seq_no, 1);
110177 this->serv.layer.receive = cfctrl_recv;
110178 sprintf(this->serv.layer.name, "ctrl");
110179 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
110180@@ -130,8 +131,8 @@ static void cfctrl_insert_req(struct cfctrl *ctrl,
110181 struct cfctrl_request_info *req)
110182 {
110183 spin_lock_bh(&ctrl->info_list_lock);
110184- atomic_inc(&ctrl->req_seq_no);
110185- req->sequence_no = atomic_read(&ctrl->req_seq_no);
110186+ atomic_inc_unchecked(&ctrl->req_seq_no);
110187+ req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
110188 list_add_tail(&req->list, &ctrl->list);
110189 spin_unlock_bh(&ctrl->info_list_lock);
110190 }
110191@@ -149,7 +150,7 @@ static struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
110192 if (p != first)
110193 pr_warn("Requests are not received in order\n");
110194
110195- atomic_set(&ctrl->rsp_seq_no,
110196+ atomic_set_unchecked(&ctrl->rsp_seq_no,
110197 p->sequence_no);
110198 list_del(&p->list);
110199 goto out;
110200diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c
110201index 67a4a36..8d28068 100644
110202--- a/net/caif/chnl_net.c
110203+++ b/net/caif/chnl_net.c
110204@@ -515,7 +515,7 @@ static const struct nla_policy ipcaif_policy[IFLA_CAIF_MAX + 1] = {
110205 };
110206
110207
110208-static struct rtnl_link_ops ipcaif_link_ops __read_mostly = {
110209+static struct rtnl_link_ops ipcaif_link_ops = {
110210 .kind = "caif",
110211 .priv_size = sizeof(struct chnl_net),
110212 .setup = ipcaif_net_setup,
110213diff --git a/net/can/af_can.c b/net/can/af_can.c
110214index 32d710e..93bcf05 100644
110215--- a/net/can/af_can.c
110216+++ b/net/can/af_can.c
110217@@ -884,7 +884,7 @@ static const struct net_proto_family can_family_ops = {
110218 };
110219
110220 /* notifier block for netdevice event */
110221-static struct notifier_block can_netdev_notifier __read_mostly = {
110222+static struct notifier_block can_netdev_notifier = {
110223 .notifier_call = can_notifier,
110224 };
110225
110226diff --git a/net/can/bcm.c b/net/can/bcm.c
110227index ee9ffd9..dfdf3d4 100644
110228--- a/net/can/bcm.c
110229+++ b/net/can/bcm.c
110230@@ -1619,7 +1619,7 @@ static int __init bcm_module_init(void)
110231 }
110232
110233 /* create /proc/net/can-bcm directory */
110234- proc_dir = proc_mkdir("can-bcm", init_net.proc_net);
110235+ proc_dir = proc_mkdir_restrict("can-bcm", init_net.proc_net);
110236 return 0;
110237 }
110238
110239diff --git a/net/can/gw.c b/net/can/gw.c
110240index a6f448e..5902171 100644
110241--- a/net/can/gw.c
110242+++ b/net/can/gw.c
110243@@ -80,7 +80,6 @@ MODULE_PARM_DESC(max_hops,
110244 "default: " __stringify(CGW_DEFAULT_HOPS) ")");
110245
110246 static HLIST_HEAD(cgw_list);
110247-static struct notifier_block notifier;
110248
110249 static struct kmem_cache *cgw_cache __read_mostly;
110250
110251@@ -948,6 +947,10 @@ static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh)
110252 return err;
110253 }
110254
110255+static struct notifier_block notifier = {
110256+ .notifier_call = cgw_notifier
110257+};
110258+
110259 static __init int cgw_module_init(void)
110260 {
110261 /* sanitize given module parameter */
110262@@ -963,7 +966,6 @@ static __init int cgw_module_init(void)
110263 return -ENOMEM;
110264
110265 /* set notifier */
110266- notifier.notifier_call = cgw_notifier;
110267 register_netdevice_notifier(&notifier);
110268
110269 if (__rtnl_register(PF_CAN, RTM_GETROUTE, NULL, cgw_dump_jobs, NULL)) {
110270diff --git a/net/can/proc.c b/net/can/proc.c
110271index 1a19b98..df2b4ec 100644
110272--- a/net/can/proc.c
110273+++ b/net/can/proc.c
110274@@ -514,7 +514,7 @@ static void can_remove_proc_readentry(const char *name)
110275 void can_init_proc(void)
110276 {
110277 /* create /proc/net/can directory */
110278- can_dir = proc_mkdir("can", init_net.proc_net);
110279+ can_dir = proc_mkdir_restrict("can", init_net.proc_net);
110280
110281 if (!can_dir) {
110282 printk(KERN_INFO "can: failed to create /proc/net/can . "
110283diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
110284index a9f4ae4..ee19b92 100644
110285--- a/net/ceph/messenger.c
110286+++ b/net/ceph/messenger.c
110287@@ -188,7 +188,7 @@ static void con_fault(struct ceph_connection *con);
110288 #define MAX_ADDR_STR_LEN 64 /* 54 is enough */
110289
110290 static char addr_str[ADDR_STR_COUNT][MAX_ADDR_STR_LEN];
110291-static atomic_t addr_str_seq = ATOMIC_INIT(0);
110292+static atomic_unchecked_t addr_str_seq = ATOMIC_INIT(0);
110293
110294 static struct page *zero_page; /* used in certain error cases */
110295
110296@@ -199,7 +199,7 @@ const char *ceph_pr_addr(const struct sockaddr_storage *ss)
110297 struct sockaddr_in *in4 = (struct sockaddr_in *) ss;
110298 struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) ss;
110299
110300- i = atomic_inc_return(&addr_str_seq) & ADDR_STR_COUNT_MASK;
110301+ i = atomic_inc_return_unchecked(&addr_str_seq) & ADDR_STR_COUNT_MASK;
110302 s = addr_str[i];
110303
110304 switch (ss->ss_family) {
110305diff --git a/net/compat.c b/net/compat.c
110306index f7bd286..76ea56a 100644
110307--- a/net/compat.c
110308+++ b/net/compat.c
110309@@ -100,20 +100,20 @@ ssize_t get_compat_msghdr(struct msghdr *kmsg,
110310
110311 #define CMSG_COMPAT_FIRSTHDR(msg) \
110312 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
110313- (struct compat_cmsghdr __user *)((msg)->msg_control) : \
110314+ (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
110315 (struct compat_cmsghdr __user *)NULL)
110316
110317 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
110318 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
110319 (ucmlen) <= (unsigned long) \
110320 ((mhdr)->msg_controllen - \
110321- ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
110322+ ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
110323
110324 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
110325 struct compat_cmsghdr __user *cmsg, int cmsg_len)
110326 {
110327 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
110328- if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
110329+ if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
110330 msg->msg_controllen)
110331 return NULL;
110332 return (struct compat_cmsghdr __user *)ptr;
110333@@ -203,7 +203,7 @@ Efault:
110334
110335 int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *data)
110336 {
110337- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
110338+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
110339 struct compat_cmsghdr cmhdr;
110340 struct compat_timeval ctv;
110341 struct compat_timespec cts[3];
110342@@ -259,7 +259,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
110343
110344 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
110345 {
110346- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
110347+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
110348 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
110349 int fdnum = scm->fp->count;
110350 struct file **fp = scm->fp->fp;
110351@@ -347,7 +347,7 @@ static int do_set_sock_timeout(struct socket *sock, int level,
110352 return -EFAULT;
110353 old_fs = get_fs();
110354 set_fs(KERNEL_DS);
110355- err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime));
110356+ err = sock_setsockopt(sock, level, optname, (char __force_user *)&ktime, sizeof(ktime));
110357 set_fs(old_fs);
110358
110359 return err;
110360@@ -408,7 +408,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
110361 len = sizeof(ktime);
110362 old_fs = get_fs();
110363 set_fs(KERNEL_DS);
110364- err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
110365+ err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
110366 set_fs(old_fs);
110367
110368 if (!err) {
110369@@ -551,7 +551,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
110370 case MCAST_JOIN_GROUP:
110371 case MCAST_LEAVE_GROUP:
110372 {
110373- struct compat_group_req __user *gr32 = (void *)optval;
110374+ struct compat_group_req __user *gr32 = (void __user *)optval;
110375 struct group_req __user *kgr =
110376 compat_alloc_user_space(sizeof(struct group_req));
110377 u32 interface;
110378@@ -572,7 +572,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
110379 case MCAST_BLOCK_SOURCE:
110380 case MCAST_UNBLOCK_SOURCE:
110381 {
110382- struct compat_group_source_req __user *gsr32 = (void *)optval;
110383+ struct compat_group_source_req __user *gsr32 = (void __user *)optval;
110384 struct group_source_req __user *kgsr = compat_alloc_user_space(
110385 sizeof(struct group_source_req));
110386 u32 interface;
110387@@ -593,7 +593,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
110388 }
110389 case MCAST_MSFILTER:
110390 {
110391- struct compat_group_filter __user *gf32 = (void *)optval;
110392+ struct compat_group_filter __user *gf32 = (void __user *)optval;
110393 struct group_filter __user *kgf;
110394 u32 interface, fmode, numsrc;
110395
110396@@ -631,7 +631,7 @@ int compat_mc_getsockopt(struct sock *sock, int level, int optname,
110397 char __user *optval, int __user *optlen,
110398 int (*getsockopt)(struct sock *, int, int, char __user *, int __user *))
110399 {
110400- struct compat_group_filter __user *gf32 = (void *)optval;
110401+ struct compat_group_filter __user *gf32 = (void __user *)optval;
110402 struct group_filter __user *kgf;
110403 int __user *koptlen;
110404 u32 interface, fmode, numsrc;
110405@@ -775,7 +775,7 @@ COMPAT_SYSCALL_DEFINE2(socketcall, int, call, u32 __user *, args)
110406
110407 if (call < SYS_SOCKET || call > SYS_SENDMMSG)
110408 return -EINVAL;
110409- if (copy_from_user(a, args, nas[call]))
110410+ if (nas[call] > sizeof a || copy_from_user(a, args, nas[call]))
110411 return -EFAULT;
110412 a0 = a[0];
110413 a1 = a[1];
110414diff --git a/net/core/datagram.c b/net/core/datagram.c
110415index df493d6..1145766 100644
110416--- a/net/core/datagram.c
110417+++ b/net/core/datagram.c
110418@@ -302,7 +302,7 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
110419 }
110420
110421 kfree_skb(skb);
110422- atomic_inc(&sk->sk_drops);
110423+ atomic_inc_unchecked(&sk->sk_drops);
110424 sk_mem_reclaim_partial(sk);
110425
110426 return err;
110427diff --git a/net/core/dev.c b/net/core/dev.c
110428index e977e15..74b19b0 100644
110429--- a/net/core/dev.c
110430+++ b/net/core/dev.c
110431@@ -1681,14 +1681,14 @@ int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
110432 {
110433 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
110434 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
110435- atomic_long_inc(&dev->rx_dropped);
110436+ atomic_long_inc_unchecked(&dev->rx_dropped);
110437 kfree_skb(skb);
110438 return NET_RX_DROP;
110439 }
110440 }
110441
110442 if (unlikely(!is_skb_forwardable(dev, skb))) {
110443- atomic_long_inc(&dev->rx_dropped);
110444+ atomic_long_inc_unchecked(&dev->rx_dropped);
110445 kfree_skb(skb);
110446 return NET_RX_DROP;
110447 }
110448@@ -2987,7 +2987,7 @@ recursion_alert:
110449 drop:
110450 rcu_read_unlock_bh();
110451
110452- atomic_long_inc(&dev->tx_dropped);
110453+ atomic_long_inc_unchecked(&dev->tx_dropped);
110454 kfree_skb_list(skb);
110455 return rc;
110456 out:
110457@@ -3336,7 +3336,7 @@ enqueue:
110458
110459 local_irq_restore(flags);
110460
110461- atomic_long_inc(&skb->dev->rx_dropped);
110462+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
110463 kfree_skb(skb);
110464 return NET_RX_DROP;
110465 }
110466@@ -3413,7 +3413,7 @@ int netif_rx_ni(struct sk_buff *skb)
110467 }
110468 EXPORT_SYMBOL(netif_rx_ni);
110469
110470-static void net_tx_action(struct softirq_action *h)
110471+static __latent_entropy void net_tx_action(void)
110472 {
110473 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
110474
110475@@ -3751,7 +3751,7 @@ ncls:
110476 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
110477 } else {
110478 drop:
110479- atomic_long_inc(&skb->dev->rx_dropped);
110480+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
110481 kfree_skb(skb);
110482 /* Jamal, now you will not able to escape explaining
110483 * me how you were going to use this. :-)
110484@@ -4640,7 +4640,7 @@ out_unlock:
110485 return work;
110486 }
110487
110488-static void net_rx_action(struct softirq_action *h)
110489+static __latent_entropy void net_rx_action(void)
110490 {
110491 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
110492 unsigned long time_limit = jiffies + 2;
110493@@ -6676,8 +6676,8 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
110494 } else {
110495 netdev_stats_to_stats64(storage, &dev->stats);
110496 }
110497- storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
110498- storage->tx_dropped += atomic_long_read(&dev->tx_dropped);
110499+ storage->rx_dropped += atomic_long_read_unchecked(&dev->rx_dropped);
110500+ storage->tx_dropped += atomic_long_read_unchecked(&dev->tx_dropped);
110501 return storage;
110502 }
110503 EXPORT_SYMBOL(dev_get_stats);
110504diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c
110505index b94b1d2..da3ed7c 100644
110506--- a/net/core/dev_ioctl.c
110507+++ b/net/core/dev_ioctl.c
110508@@ -368,8 +368,13 @@ void dev_load(struct net *net, const char *name)
110509 no_module = !dev;
110510 if (no_module && capable(CAP_NET_ADMIN))
110511 no_module = request_module("netdev-%s", name);
110512- if (no_module && capable(CAP_SYS_MODULE))
110513+ if (no_module && capable(CAP_SYS_MODULE)) {
110514+#ifdef CONFIG_GRKERNSEC_MODHARDEN
110515+ ___request_module(true, "grsec_modharden_netdev", "%s", name);
110516+#else
110517 request_module("%s", name);
110518+#endif
110519+ }
110520 }
110521 EXPORT_SYMBOL(dev_load);
110522
110523diff --git a/net/core/filter.c b/net/core/filter.c
110524index f6bdc2b..76eba8e 100644
110525--- a/net/core/filter.c
110526+++ b/net/core/filter.c
110527@@ -533,7 +533,11 @@ do_pass:
110528
110529 /* Unknown instruction. */
110530 default:
110531- goto err;
110532+ WARN(1, KERN_ALERT "Unknown sock filter code:%u jt:%u tf:%u k:%u\n",
110533+ fp->code, fp->jt, fp->jf, fp->k);
110534+ kfree(addrs);
110535+ BUG();
110536+ return -EINVAL;
110537 }
110538
110539 insn++;
110540@@ -577,7 +581,7 @@ static int check_load_and_stores(const struct sock_filter *filter, int flen)
110541 u16 *masks, memvalid = 0; /* One bit per cell, 16 cells */
110542 int pc, ret = 0;
110543
110544- BUILD_BUG_ON(BPF_MEMWORDS > 16);
110545+ BUILD_BUG_ON(BPF_MEMWORDS != 16);
110546
110547 masks = kmalloc_array(flen, sizeof(*masks), GFP_KERNEL);
110548 if (!masks)
110549@@ -992,7 +996,7 @@ int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog)
110550 if (!fp)
110551 return -ENOMEM;
110552
110553- memcpy(fp->insns, fprog->filter, fsize);
110554+ memcpy(fp->insns, (void __force_kernel *)fprog->filter, fsize);
110555
110556 fp->len = fprog->len;
110557 /* Since unattached filters are not copied back to user
110558diff --git a/net/core/flow.c b/net/core/flow.c
110559index 1033725..340f65d 100644
110560--- a/net/core/flow.c
110561+++ b/net/core/flow.c
110562@@ -65,7 +65,7 @@ static void flow_cache_new_hashrnd(unsigned long arg)
110563 static int flow_entry_valid(struct flow_cache_entry *fle,
110564 struct netns_xfrm *xfrm)
110565 {
110566- if (atomic_read(&xfrm->flow_cache_genid) != fle->genid)
110567+ if (atomic_read_unchecked(&xfrm->flow_cache_genid) != fle->genid)
110568 return 0;
110569 if (fle->object && !fle->object->ops->check(fle->object))
110570 return 0;
110571@@ -242,7 +242,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
110572 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
110573 fcp->hash_count++;
110574 }
110575- } else if (likely(fle->genid == atomic_read(&net->xfrm.flow_cache_genid))) {
110576+ } else if (likely(fle->genid == atomic_read_unchecked(&net->xfrm.flow_cache_genid))) {
110577 flo = fle->object;
110578 if (!flo)
110579 goto ret_object;
110580@@ -263,7 +263,7 @@ nocache:
110581 }
110582 flo = resolver(net, key, family, dir, flo, ctx);
110583 if (fle) {
110584- fle->genid = atomic_read(&net->xfrm.flow_cache_genid);
110585+ fle->genid = atomic_read_unchecked(&net->xfrm.flow_cache_genid);
110586 if (!IS_ERR(flo))
110587 fle->object = flo;
110588 else
110589diff --git a/net/core/neighbour.c b/net/core/neighbour.c
110590index d0e5d66..c55e69d 100644
110591--- a/net/core/neighbour.c
110592+++ b/net/core/neighbour.c
110593@@ -2819,7 +2819,7 @@ static int proc_unres_qlen(struct ctl_table *ctl, int write,
110594 void __user *buffer, size_t *lenp, loff_t *ppos)
110595 {
110596 int size, ret;
110597- struct ctl_table tmp = *ctl;
110598+ ctl_table_no_const tmp = *ctl;
110599
110600 tmp.extra1 = &zero;
110601 tmp.extra2 = &unres_qlen_max;
110602@@ -2881,7 +2881,7 @@ static int neigh_proc_dointvec_zero_intmax(struct ctl_table *ctl, int write,
110603 void __user *buffer,
110604 size_t *lenp, loff_t *ppos)
110605 {
110606- struct ctl_table tmp = *ctl;
110607+ ctl_table_no_const tmp = *ctl;
110608 int ret;
110609
110610 tmp.extra1 = &zero;
110611diff --git a/net/core/net-procfs.c b/net/core/net-procfs.c
110612index 2bf8329..2eb1423 100644
110613--- a/net/core/net-procfs.c
110614+++ b/net/core/net-procfs.c
110615@@ -79,7 +79,13 @@ static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
110616 struct rtnl_link_stats64 temp;
110617 const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
110618
110619- seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
110620+ if (gr_proc_is_restricted())
110621+ seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
110622+ "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n",
110623+ dev->name, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL,
110624+ 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL);
110625+ else
110626+ seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
110627 "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n",
110628 dev->name, stats->rx_bytes, stats->rx_packets,
110629 stats->rx_errors,
110630@@ -166,7 +172,7 @@ static int softnet_seq_show(struct seq_file *seq, void *v)
110631 return 0;
110632 }
110633
110634-static const struct seq_operations dev_seq_ops = {
110635+const struct seq_operations dev_seq_ops = {
110636 .start = dev_seq_start,
110637 .next = dev_seq_next,
110638 .stop = dev_seq_stop,
110639@@ -196,7 +202,7 @@ static const struct seq_operations softnet_seq_ops = {
110640
110641 static int softnet_seq_open(struct inode *inode, struct file *file)
110642 {
110643- return seq_open(file, &softnet_seq_ops);
110644+ return seq_open_restrict(file, &softnet_seq_ops);
110645 }
110646
110647 static const struct file_operations softnet_seq_fops = {
110648@@ -283,8 +289,13 @@ static int ptype_seq_show(struct seq_file *seq, void *v)
110649 else
110650 seq_printf(seq, "%04x", ntohs(pt->type));
110651
110652+#ifdef CONFIG_GRKERNSEC_HIDESYM
110653+ seq_printf(seq, " %-8s %pf\n",
110654+ pt->dev ? pt->dev->name : "", NULL);
110655+#else
110656 seq_printf(seq, " %-8s %pf\n",
110657 pt->dev ? pt->dev->name : "", pt->func);
110658+#endif
110659 }
110660
110661 return 0;
110662diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
110663index f2aa73b..0d1a1ea 100644
110664--- a/net/core/net-sysfs.c
110665+++ b/net/core/net-sysfs.c
110666@@ -279,7 +279,7 @@ static ssize_t carrier_changes_show(struct device *dev,
110667 {
110668 struct net_device *netdev = to_net_dev(dev);
110669 return sprintf(buf, fmt_dec,
110670- atomic_read(&netdev->carrier_changes));
110671+ atomic_read_unchecked(&netdev->carrier_changes));
110672 }
110673 static DEVICE_ATTR_RO(carrier_changes);
110674
110675diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
110676index 70d3450..eb7c528 100644
110677--- a/net/core/net_namespace.c
110678+++ b/net/core/net_namespace.c
110679@@ -663,7 +663,7 @@ static int __register_pernet_operations(struct list_head *list,
110680 int error;
110681 LIST_HEAD(net_exit_list);
110682
110683- list_add_tail(&ops->list, list);
110684+ pax_list_add_tail((struct list_head *)&ops->list, list);
110685 if (ops->init || (ops->id && ops->size)) {
110686 for_each_net(net) {
110687 error = ops_init(ops, net);
110688@@ -676,7 +676,7 @@ static int __register_pernet_operations(struct list_head *list,
110689
110690 out_undo:
110691 /* If I have an error cleanup all namespaces I initialized */
110692- list_del(&ops->list);
110693+ pax_list_del((struct list_head *)&ops->list);
110694 ops_exit_list(ops, &net_exit_list);
110695 ops_free_list(ops, &net_exit_list);
110696 return error;
110697@@ -687,7 +687,7 @@ static void __unregister_pernet_operations(struct pernet_operations *ops)
110698 struct net *net;
110699 LIST_HEAD(net_exit_list);
110700
110701- list_del(&ops->list);
110702+ pax_list_del((struct list_head *)&ops->list);
110703 for_each_net(net)
110704 list_add_tail(&net->exit_list, &net_exit_list);
110705 ops_exit_list(ops, &net_exit_list);
110706@@ -821,7 +821,7 @@ int register_pernet_device(struct pernet_operations *ops)
110707 mutex_lock(&net_mutex);
110708 error = register_pernet_operations(&pernet_list, ops);
110709 if (!error && (first_device == &pernet_list))
110710- first_device = &ops->list;
110711+ first_device = (struct list_head *)&ops->list;
110712 mutex_unlock(&net_mutex);
110713 return error;
110714 }
110715diff --git a/net/core/netpoll.c b/net/core/netpoll.c
110716index c126a87..10ad89d 100644
110717--- a/net/core/netpoll.c
110718+++ b/net/core/netpoll.c
110719@@ -377,7 +377,7 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
110720 struct udphdr *udph;
110721 struct iphdr *iph;
110722 struct ethhdr *eth;
110723- static atomic_t ip_ident;
110724+ static atomic_unchecked_t ip_ident;
110725 struct ipv6hdr *ip6h;
110726
110727 udp_len = len + sizeof(*udph);
110728@@ -448,7 +448,7 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
110729 put_unaligned(0x45, (unsigned char *)iph);
110730 iph->tos = 0;
110731 put_unaligned(htons(ip_len), &(iph->tot_len));
110732- iph->id = htons(atomic_inc_return(&ip_ident));
110733+ iph->id = htons(atomic_inc_return_unchecked(&ip_ident));
110734 iph->frag_off = 0;
110735 iph->ttl = 64;
110736 iph->protocol = IPPROTO_UDP;
110737diff --git a/net/core/pktgen.c b/net/core/pktgen.c
110738index 508155b..fad080f 100644
110739--- a/net/core/pktgen.c
110740+++ b/net/core/pktgen.c
110741@@ -3755,7 +3755,7 @@ static int __net_init pg_net_init(struct net *net)
110742 pn->net = net;
110743 INIT_LIST_HEAD(&pn->pktgen_threads);
110744 pn->pktgen_exiting = false;
110745- pn->proc_dir = proc_mkdir(PG_PROC_DIR, pn->net->proc_net);
110746+ pn->proc_dir = proc_mkdir_restrict(PG_PROC_DIR, pn->net->proc_net);
110747 if (!pn->proc_dir) {
110748 pr_warn("cannot create /proc/net/%s\n", PG_PROC_DIR);
110749 return -ENODEV;
110750diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
110751index a2b90e1..7882f75 100644
110752--- a/net/core/rtnetlink.c
110753+++ b/net/core/rtnetlink.c
110754@@ -61,7 +61,7 @@ struct rtnl_link {
110755 rtnl_doit_func doit;
110756 rtnl_dumpit_func dumpit;
110757 rtnl_calcit_func calcit;
110758-};
110759+} __no_const;
110760
110761 static DEFINE_MUTEX(rtnl_mutex);
110762
110763@@ -307,10 +307,13 @@ int __rtnl_link_register(struct rtnl_link_ops *ops)
110764 * to use the ops for creating device. So do not
110765 * fill up dellink as well. That disables rtnl_dellink.
110766 */
110767- if (ops->setup && !ops->dellink)
110768- ops->dellink = unregister_netdevice_queue;
110769+ if (ops->setup && !ops->dellink) {
110770+ pax_open_kernel();
110771+ *(void **)&ops->dellink = unregister_netdevice_queue;
110772+ pax_close_kernel();
110773+ }
110774
110775- list_add_tail(&ops->list, &link_ops);
110776+ pax_list_add_tail((struct list_head *)&ops->list, &link_ops);
110777 return 0;
110778 }
110779 EXPORT_SYMBOL_GPL(__rtnl_link_register);
110780@@ -357,7 +360,7 @@ void __rtnl_link_unregister(struct rtnl_link_ops *ops)
110781 for_each_net(net) {
110782 __rtnl_kill_links(net, ops);
110783 }
110784- list_del(&ops->list);
110785+ pax_list_del((struct list_head *)&ops->list);
110786 }
110787 EXPORT_SYMBOL_GPL(__rtnl_link_unregister);
110788
110789@@ -1047,7 +1050,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
110790 (dev->ifalias &&
110791 nla_put_string(skb, IFLA_IFALIAS, dev->ifalias)) ||
110792 nla_put_u32(skb, IFLA_CARRIER_CHANGES,
110793- atomic_read(&dev->carrier_changes)))
110794+ atomic_read_unchecked(&dev->carrier_changes)))
110795 goto nla_put_failure;
110796
110797 if (1) {
110798diff --git a/net/core/scm.c b/net/core/scm.c
110799index 3b6899b..cf36238 100644
110800--- a/net/core/scm.c
110801+++ b/net/core/scm.c
110802@@ -209,7 +209,7 @@ EXPORT_SYMBOL(__scm_send);
110803 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
110804 {
110805 struct cmsghdr __user *cm
110806- = (__force struct cmsghdr __user *)msg->msg_control;
110807+ = (struct cmsghdr __force_user *)msg->msg_control;
110808 struct cmsghdr cmhdr;
110809 int cmlen = CMSG_LEN(len);
110810 int err;
110811@@ -232,7 +232,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
110812 err = -EFAULT;
110813 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
110814 goto out;
110815- if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
110816+ if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
110817 goto out;
110818 cmlen = CMSG_SPACE(len);
110819 if (msg->msg_controllen < cmlen)
110820@@ -248,7 +248,7 @@ EXPORT_SYMBOL(put_cmsg);
110821 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
110822 {
110823 struct cmsghdr __user *cm
110824- = (__force struct cmsghdr __user*)msg->msg_control;
110825+ = (struct cmsghdr __force_user *)msg->msg_control;
110826
110827 int fdmax = 0;
110828 int fdnum = scm->fp->count;
110829@@ -268,7 +268,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
110830 if (fdnum < fdmax)
110831 fdmax = fdnum;
110832
110833- for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
110834+ for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
110835 i++, cmfptr++)
110836 {
110837 struct socket *sock;
110838diff --git a/net/core/skbuff.c b/net/core/skbuff.c
110839index 1e3abb8..d751ebd 100644
110840--- a/net/core/skbuff.c
110841+++ b/net/core/skbuff.c
110842@@ -2139,7 +2139,7 @@ EXPORT_SYMBOL(__skb_checksum);
110843 __wsum skb_checksum(const struct sk_buff *skb, int offset,
110844 int len, __wsum csum)
110845 {
110846- const struct skb_checksum_ops ops = {
110847+ static const struct skb_checksum_ops ops = {
110848 .update = csum_partial_ext,
110849 .combine = csum_block_add_ext,
110850 };
110851@@ -3379,12 +3379,14 @@ void __init skb_init(void)
110852 skbuff_head_cache = kmem_cache_create("skbuff_head_cache",
110853 sizeof(struct sk_buff),
110854 0,
110855- SLAB_HWCACHE_ALIGN|SLAB_PANIC,
110856+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|
110857+ SLAB_NO_SANITIZE,
110858 NULL);
110859 skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache",
110860 sizeof(struct sk_buff_fclones),
110861 0,
110862- SLAB_HWCACHE_ALIGN|SLAB_PANIC,
110863+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|
110864+ SLAB_NO_SANITIZE,
110865 NULL);
110866 }
110867
110868diff --git a/net/core/sock.c b/net/core/sock.c
110869index c77d5d2..c1d6a84 100644
110870--- a/net/core/sock.c
110871+++ b/net/core/sock.c
110872@@ -443,7 +443,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
110873 struct sk_buff_head *list = &sk->sk_receive_queue;
110874
110875 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
110876- atomic_inc(&sk->sk_drops);
110877+ atomic_inc_unchecked(&sk->sk_drops);
110878 trace_sock_rcvqueue_full(sk, skb);
110879 return -ENOMEM;
110880 }
110881@@ -453,7 +453,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
110882 return err;
110883
110884 if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
110885- atomic_inc(&sk->sk_drops);
110886+ atomic_inc_unchecked(&sk->sk_drops);
110887 return -ENOBUFS;
110888 }
110889
110890@@ -466,7 +466,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
110891 skb_dst_force(skb);
110892
110893 spin_lock_irqsave(&list->lock, flags);
110894- skb->dropcount = atomic_read(&sk->sk_drops);
110895+ skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
110896 __skb_queue_tail(list, skb);
110897 spin_unlock_irqrestore(&list->lock, flags);
110898
110899@@ -486,7 +486,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
110900 skb->dev = NULL;
110901
110902 if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {
110903- atomic_inc(&sk->sk_drops);
110904+ atomic_inc_unchecked(&sk->sk_drops);
110905 goto discard_and_relse;
110906 }
110907 if (nested)
110908@@ -504,7 +504,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
110909 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
110910 } else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
110911 bh_unlock_sock(sk);
110912- atomic_inc(&sk->sk_drops);
110913+ atomic_inc_unchecked(&sk->sk_drops);
110914 goto discard_and_relse;
110915 }
110916
110917@@ -910,6 +910,7 @@ set_rcvbuf:
110918 }
110919 break;
110920
110921+#ifndef GRKERNSEC_BPF_HARDEN
110922 case SO_ATTACH_BPF:
110923 ret = -EINVAL;
110924 if (optlen == sizeof(u32)) {
110925@@ -922,7 +923,7 @@ set_rcvbuf:
110926 ret = sk_attach_bpf(ufd, sk);
110927 }
110928 break;
110929-
110930+#endif
110931 case SO_DETACH_FILTER:
110932 ret = sk_detach_filter(sk);
110933 break;
110934@@ -1026,12 +1027,12 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
110935 struct timeval tm;
110936 } v;
110937
110938- int lv = sizeof(int);
110939- int len;
110940+ unsigned int lv = sizeof(int);
110941+ unsigned int len;
110942
110943 if (get_user(len, optlen))
110944 return -EFAULT;
110945- if (len < 0)
110946+ if (len > INT_MAX)
110947 return -EINVAL;
110948
110949 memset(&v, 0, sizeof(v));
110950@@ -1169,11 +1170,11 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
110951
110952 case SO_PEERNAME:
110953 {
110954- char address[128];
110955+ char address[_K_SS_MAXSIZE];
110956
110957 if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
110958 return -ENOTCONN;
110959- if (lv < len)
110960+ if (lv < len || sizeof address < len)
110961 return -EINVAL;
110962 if (copy_to_user(optval, address, len))
110963 return -EFAULT;
110964@@ -1258,7 +1259,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
110965
110966 if (len > lv)
110967 len = lv;
110968- if (copy_to_user(optval, &v, len))
110969+ if (len > sizeof(v) || copy_to_user(optval, &v, len))
110970 return -EFAULT;
110971 lenout:
110972 if (put_user(len, optlen))
110973@@ -2375,7 +2376,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
110974 */
110975 smp_wmb();
110976 atomic_set(&sk->sk_refcnt, 1);
110977- atomic_set(&sk->sk_drops, 0);
110978+ atomic_set_unchecked(&sk->sk_drops, 0);
110979 }
110980 EXPORT_SYMBOL(sock_init_data);
110981
110982@@ -2503,6 +2504,7 @@ void sock_enable_timestamp(struct sock *sk, int flag)
110983 int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
110984 int level, int type)
110985 {
110986+ struct sock_extended_err ee;
110987 struct sock_exterr_skb *serr;
110988 struct sk_buff *skb;
110989 int copied, err;
110990@@ -2524,7 +2526,8 @@ int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
110991 sock_recv_timestamp(msg, sk, skb);
110992
110993 serr = SKB_EXT_ERR(skb);
110994- put_cmsg(msg, level, type, sizeof(serr->ee), &serr->ee);
110995+ ee = serr->ee;
110996+ put_cmsg(msg, level, type, sizeof ee, &ee);
110997
110998 msg->msg_flags |= MSG_ERRQUEUE;
110999 err = copied;
111000diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
111001index ad704c7..ca48aff 100644
111002--- a/net/core/sock_diag.c
111003+++ b/net/core/sock_diag.c
111004@@ -9,26 +9,33 @@
111005 #include <linux/inet_diag.h>
111006 #include <linux/sock_diag.h>
111007
111008-static const struct sock_diag_handler *sock_diag_handlers[AF_MAX];
111009+static const struct sock_diag_handler *sock_diag_handlers[AF_MAX] __read_only;
111010 static int (*inet_rcv_compat)(struct sk_buff *skb, struct nlmsghdr *nlh);
111011 static DEFINE_MUTEX(sock_diag_table_mutex);
111012
111013 int sock_diag_check_cookie(void *sk, __u32 *cookie)
111014 {
111015+#ifndef CONFIG_GRKERNSEC_HIDESYM
111016 if ((cookie[0] != INET_DIAG_NOCOOKIE ||
111017 cookie[1] != INET_DIAG_NOCOOKIE) &&
111018 ((u32)(unsigned long)sk != cookie[0] ||
111019 (u32)((((unsigned long)sk) >> 31) >> 1) != cookie[1]))
111020 return -ESTALE;
111021 else
111022+#endif
111023 return 0;
111024 }
111025 EXPORT_SYMBOL_GPL(sock_diag_check_cookie);
111026
111027 void sock_diag_save_cookie(void *sk, __u32 *cookie)
111028 {
111029+#ifdef CONFIG_GRKERNSEC_HIDESYM
111030+ cookie[0] = 0;
111031+ cookie[1] = 0;
111032+#else
111033 cookie[0] = (u32)(unsigned long)sk;
111034 cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
111035+#endif
111036 }
111037 EXPORT_SYMBOL_GPL(sock_diag_save_cookie);
111038
111039@@ -110,8 +117,11 @@ int sock_diag_register(const struct sock_diag_handler *hndl)
111040 mutex_lock(&sock_diag_table_mutex);
111041 if (sock_diag_handlers[hndl->family])
111042 err = -EBUSY;
111043- else
111044+ else {
111045+ pax_open_kernel();
111046 sock_diag_handlers[hndl->family] = hndl;
111047+ pax_close_kernel();
111048+ }
111049 mutex_unlock(&sock_diag_table_mutex);
111050
111051 return err;
111052@@ -127,7 +137,9 @@ void sock_diag_unregister(const struct sock_diag_handler *hnld)
111053
111054 mutex_lock(&sock_diag_table_mutex);
111055 BUG_ON(sock_diag_handlers[family] != hnld);
111056+ pax_open_kernel();
111057 sock_diag_handlers[family] = NULL;
111058+ pax_close_kernel();
111059 mutex_unlock(&sock_diag_table_mutex);
111060 }
111061 EXPORT_SYMBOL_GPL(sock_diag_unregister);
111062diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
111063index 8ce351f..2c388f7 100644
111064--- a/net/core/sysctl_net_core.c
111065+++ b/net/core/sysctl_net_core.c
111066@@ -36,7 +36,7 @@ static int rps_sock_flow_sysctl(struct ctl_table *table, int write,
111067 {
111068 unsigned int orig_size, size;
111069 int ret, i;
111070- struct ctl_table tmp = {
111071+ ctl_table_no_const tmp = {
111072 .data = &size,
111073 .maxlen = sizeof(size),
111074 .mode = table->mode
111075@@ -204,7 +204,7 @@ static int set_default_qdisc(struct ctl_table *table, int write,
111076 void __user *buffer, size_t *lenp, loff_t *ppos)
111077 {
111078 char id[IFNAMSIZ];
111079- struct ctl_table tbl = {
111080+ ctl_table_no_const tbl = {
111081 .data = id,
111082 .maxlen = IFNAMSIZ,
111083 };
111084@@ -222,7 +222,7 @@ static int set_default_qdisc(struct ctl_table *table, int write,
111085 static int proc_do_rss_key(struct ctl_table *table, int write,
111086 void __user *buffer, size_t *lenp, loff_t *ppos)
111087 {
111088- struct ctl_table fake_table;
111089+ ctl_table_no_const fake_table;
111090 char buf[NETDEV_RSS_KEY_LEN * 3];
111091
111092 snprintf(buf, sizeof(buf), "%*phC", NETDEV_RSS_KEY_LEN, netdev_rss_key);
111093@@ -286,7 +286,7 @@ static struct ctl_table net_core_table[] = {
111094 .mode = 0444,
111095 .proc_handler = proc_do_rss_key,
111096 },
111097-#ifdef CONFIG_BPF_JIT
111098+#if defined(CONFIG_BPF_JIT) && !defined(CONFIG_GRKERNSEC_BPF_HARDEN)
111099 {
111100 .procname = "bpf_jit_enable",
111101 .data = &bpf_jit_enable,
111102@@ -411,13 +411,12 @@ static struct ctl_table netns_core_table[] = {
111103
111104 static __net_init int sysctl_core_net_init(struct net *net)
111105 {
111106- struct ctl_table *tbl;
111107+ ctl_table_no_const *tbl = NULL;
111108
111109 net->core.sysctl_somaxconn = SOMAXCONN;
111110
111111- tbl = netns_core_table;
111112 if (!net_eq(net, &init_net)) {
111113- tbl = kmemdup(tbl, sizeof(netns_core_table), GFP_KERNEL);
111114+ tbl = kmemdup(netns_core_table, sizeof(netns_core_table), GFP_KERNEL);
111115 if (tbl == NULL)
111116 goto err_dup;
111117
111118@@ -427,17 +426,16 @@ static __net_init int sysctl_core_net_init(struct net *net)
111119 if (net->user_ns != &init_user_ns) {
111120 tbl[0].procname = NULL;
111121 }
111122- }
111123-
111124- net->core.sysctl_hdr = register_net_sysctl(net, "net/core", tbl);
111125+ net->core.sysctl_hdr = register_net_sysctl(net, "net/core", tbl);
111126+ } else
111127+ net->core.sysctl_hdr = register_net_sysctl(net, "net/core", netns_core_table);
111128 if (net->core.sysctl_hdr == NULL)
111129 goto err_reg;
111130
111131 return 0;
111132
111133 err_reg:
111134- if (tbl != netns_core_table)
111135- kfree(tbl);
111136+ kfree(tbl);
111137 err_dup:
111138 return -ENOMEM;
111139 }
111140@@ -452,7 +450,7 @@ static __net_exit void sysctl_core_net_exit(struct net *net)
111141 kfree(tbl);
111142 }
111143
111144-static __net_initdata struct pernet_operations sysctl_core_ops = {
111145+static __net_initconst struct pernet_operations sysctl_core_ops = {
111146 .init = sysctl_core_net_init,
111147 .exit = sysctl_core_net_exit,
111148 };
111149diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
111150index 8102286..a0c2755 100644
111151--- a/net/decnet/af_decnet.c
111152+++ b/net/decnet/af_decnet.c
111153@@ -466,6 +466,7 @@ static struct proto dn_proto = {
111154 .sysctl_rmem = sysctl_decnet_rmem,
111155 .max_header = DN_MAX_NSP_DATA_HEADER + 64,
111156 .obj_size = sizeof(struct dn_sock),
111157+ .slab_flags = SLAB_USERCOPY,
111158 };
111159
111160 static struct sock *dn_alloc_sock(struct net *net, struct socket *sock, gfp_t gfp)
111161diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
111162index b2c26b0..41f803e 100644
111163--- a/net/decnet/dn_dev.c
111164+++ b/net/decnet/dn_dev.c
111165@@ -201,7 +201,7 @@ static struct dn_dev_sysctl_table {
111166 .extra1 = &min_t3,
111167 .extra2 = &max_t3
111168 },
111169- {0}
111170+ { }
111171 },
111172 };
111173
111174diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
111175index 5325b54..a0d4d69 100644
111176--- a/net/decnet/sysctl_net_decnet.c
111177+++ b/net/decnet/sysctl_net_decnet.c
111178@@ -174,7 +174,7 @@ static int dn_node_address_handler(struct ctl_table *table, int write,
111179
111180 if (len > *lenp) len = *lenp;
111181
111182- if (copy_to_user(buffer, addr, len))
111183+ if (len > sizeof addr || copy_to_user(buffer, addr, len))
111184 return -EFAULT;
111185
111186 *lenp = len;
111187@@ -237,7 +237,7 @@ static int dn_def_dev_handler(struct ctl_table *table, int write,
111188
111189 if (len > *lenp) len = *lenp;
111190
111191- if (copy_to_user(buffer, devname, len))
111192+ if (len > sizeof devname || copy_to_user(buffer, devname, len))
111193 return -EFAULT;
111194
111195 *lenp = len;
111196diff --git a/net/hsr/hsr_netlink.c b/net/hsr/hsr_netlink.c
111197index a2c7e4c..3dc9f67 100644
111198--- a/net/hsr/hsr_netlink.c
111199+++ b/net/hsr/hsr_netlink.c
111200@@ -102,7 +102,7 @@ nla_put_failure:
111201 return -EMSGSIZE;
111202 }
111203
111204-static struct rtnl_link_ops hsr_link_ops __read_mostly = {
111205+static struct rtnl_link_ops hsr_link_ops = {
111206 .kind = "hsr",
111207 .maxtype = IFLA_HSR_MAX,
111208 .policy = hsr_policy,
111209diff --git a/net/ieee802154/6lowpan/core.c b/net/ieee802154/6lowpan/core.c
111210index 055fbb7..c0dbe60 100644
111211--- a/net/ieee802154/6lowpan/core.c
111212+++ b/net/ieee802154/6lowpan/core.c
111213@@ -217,7 +217,7 @@ static void lowpan_dellink(struct net_device *dev, struct list_head *head)
111214 dev_put(real_dev);
111215 }
111216
111217-static struct rtnl_link_ops lowpan_link_ops __read_mostly = {
111218+static struct rtnl_link_ops lowpan_link_ops = {
111219 .kind = "lowpan",
111220 .priv_size = sizeof(struct lowpan_dev_info),
111221 .setup = lowpan_setup,
111222diff --git a/net/ieee802154/6lowpan/reassembly.c b/net/ieee802154/6lowpan/reassembly.c
111223index f46e4d1..30231f1 100644
111224--- a/net/ieee802154/6lowpan/reassembly.c
111225+++ b/net/ieee802154/6lowpan/reassembly.c
111226@@ -435,14 +435,13 @@ static struct ctl_table lowpan_frags_ctl_table[] = {
111227
111228 static int __net_init lowpan_frags_ns_sysctl_register(struct net *net)
111229 {
111230- struct ctl_table *table;
111231+ ctl_table_no_const *table = NULL;
111232 struct ctl_table_header *hdr;
111233 struct netns_ieee802154_lowpan *ieee802154_lowpan =
111234 net_ieee802154_lowpan(net);
111235
111236- table = lowpan_frags_ns_ctl_table;
111237 if (!net_eq(net, &init_net)) {
111238- table = kmemdup(table, sizeof(lowpan_frags_ns_ctl_table),
111239+ table = kmemdup(lowpan_frags_ns_ctl_table, sizeof(lowpan_frags_ns_ctl_table),
111240 GFP_KERNEL);
111241 if (table == NULL)
111242 goto err_alloc;
111243@@ -457,9 +456,9 @@ static int __net_init lowpan_frags_ns_sysctl_register(struct net *net)
111244 /* Don't export sysctls to unprivileged users */
111245 if (net->user_ns != &init_user_ns)
111246 table[0].procname = NULL;
111247- }
111248-
111249- hdr = register_net_sysctl(net, "net/ieee802154/6lowpan", table);
111250+ hdr = register_net_sysctl(net, "net/ieee802154/6lowpan", table);
111251+ } else
111252+ hdr = register_net_sysctl(net, "net/ieee802154/6lowpan", lowpan_frags_ns_ctl_table);
111253 if (hdr == NULL)
111254 goto err_reg;
111255
111256@@ -467,8 +466,7 @@ static int __net_init lowpan_frags_ns_sysctl_register(struct net *net)
111257 return 0;
111258
111259 err_reg:
111260- if (!net_eq(net, &init_net))
111261- kfree(table);
111262+ kfree(table);
111263 err_alloc:
111264 return -ENOMEM;
111265 }
111266diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
111267index 61edc49..99991a4 100644
111268--- a/net/ipv4/af_inet.c
111269+++ b/net/ipv4/af_inet.c
111270@@ -1392,7 +1392,7 @@ int inet_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
111271 return ip_recv_error(sk, msg, len, addr_len);
111272 #if IS_ENABLED(CONFIG_IPV6)
111273 if (sk->sk_family == AF_INET6)
111274- return pingv6_ops.ipv6_recv_error(sk, msg, len, addr_len);
111275+ return pingv6_ops->ipv6_recv_error(sk, msg, len, addr_len);
111276 #endif
111277 return -EINVAL;
111278 }
111279diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
111280index 3a8985c..9d2a870 100644
111281--- a/net/ipv4/devinet.c
111282+++ b/net/ipv4/devinet.c
111283@@ -69,7 +69,8 @@
111284
111285 static struct ipv4_devconf ipv4_devconf = {
111286 .data = {
111287- [IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 1,
111288+ [IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 0,
111289+ [IPV4_DEVCONF_RP_FILTER - 1] = 1,
111290 [IPV4_DEVCONF_SEND_REDIRECTS - 1] = 1,
111291 [IPV4_DEVCONF_SECURE_REDIRECTS - 1] = 1,
111292 [IPV4_DEVCONF_SHARED_MEDIA - 1] = 1,
111293@@ -80,7 +81,8 @@ static struct ipv4_devconf ipv4_devconf = {
111294
111295 static struct ipv4_devconf ipv4_devconf_dflt = {
111296 .data = {
111297- [IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 1,
111298+ [IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 0,
111299+ [IPV4_DEVCONF_RP_FILTER - 1] = 1,
111300 [IPV4_DEVCONF_SEND_REDIRECTS - 1] = 1,
111301 [IPV4_DEVCONF_SECURE_REDIRECTS - 1] = 1,
111302 [IPV4_DEVCONF_SHARED_MEDIA - 1] = 1,
111303@@ -1549,7 +1551,7 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
111304 idx = 0;
111305 head = &net->dev_index_head[h];
111306 rcu_read_lock();
111307- cb->seq = atomic_read(&net->ipv4.dev_addr_genid) ^
111308+ cb->seq = atomic_read_unchecked(&net->ipv4.dev_addr_genid) ^
111309 net->dev_base_seq;
111310 hlist_for_each_entry_rcu(dev, head, index_hlist) {
111311 if (idx < s_idx)
111312@@ -1868,7 +1870,7 @@ static int inet_netconf_dump_devconf(struct sk_buff *skb,
111313 idx = 0;
111314 head = &net->dev_index_head[h];
111315 rcu_read_lock();
111316- cb->seq = atomic_read(&net->ipv4.dev_addr_genid) ^
111317+ cb->seq = atomic_read_unchecked(&net->ipv4.dev_addr_genid) ^
111318 net->dev_base_seq;
111319 hlist_for_each_entry_rcu(dev, head, index_hlist) {
111320 if (idx < s_idx)
111321@@ -2103,7 +2105,7 @@ static int ipv4_doint_and_flush(struct ctl_table *ctl, int write,
111322 #define DEVINET_SYSCTL_FLUSHING_ENTRY(attr, name) \
111323 DEVINET_SYSCTL_COMPLEX_ENTRY(attr, name, ipv4_doint_and_flush)
111324
111325-static struct devinet_sysctl_table {
111326+static const struct devinet_sysctl_table {
111327 struct ctl_table_header *sysctl_header;
111328 struct ctl_table devinet_vars[__IPV4_DEVCONF_MAX];
111329 } devinet_sysctl = {
111330@@ -2235,7 +2237,7 @@ static __net_init int devinet_init_net(struct net *net)
111331 int err;
111332 struct ipv4_devconf *all, *dflt;
111333 #ifdef CONFIG_SYSCTL
111334- struct ctl_table *tbl = ctl_forward_entry;
111335+ ctl_table_no_const *tbl = NULL;
111336 struct ctl_table_header *forw_hdr;
111337 #endif
111338
111339@@ -2253,7 +2255,7 @@ static __net_init int devinet_init_net(struct net *net)
111340 goto err_alloc_dflt;
111341
111342 #ifdef CONFIG_SYSCTL
111343- tbl = kmemdup(tbl, sizeof(ctl_forward_entry), GFP_KERNEL);
111344+ tbl = kmemdup(ctl_forward_entry, sizeof(ctl_forward_entry), GFP_KERNEL);
111345 if (tbl == NULL)
111346 goto err_alloc_ctl;
111347
111348@@ -2273,7 +2275,10 @@ static __net_init int devinet_init_net(struct net *net)
111349 goto err_reg_dflt;
111350
111351 err = -ENOMEM;
111352- forw_hdr = register_net_sysctl(net, "net/ipv4", tbl);
111353+ if (!net_eq(net, &init_net))
111354+ forw_hdr = register_net_sysctl(net, "net/ipv4", tbl);
111355+ else
111356+ forw_hdr = register_net_sysctl(net, "net/ipv4", ctl_forward_entry);
111357 if (forw_hdr == NULL)
111358 goto err_reg_ctl;
111359 net->ipv4.forw_hdr = forw_hdr;
111360@@ -2289,8 +2294,7 @@ err_reg_ctl:
111361 err_reg_dflt:
111362 __devinet_sysctl_unregister(all);
111363 err_reg_all:
111364- if (tbl != ctl_forward_entry)
111365- kfree(tbl);
111366+ kfree(tbl);
111367 err_alloc_ctl:
111368 #endif
111369 if (dflt != &ipv4_devconf_dflt)
111370diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
111371index 23b9b3e..60cf0c4 100644
111372--- a/net/ipv4/fib_frontend.c
111373+++ b/net/ipv4/fib_frontend.c
111374@@ -1020,12 +1020,12 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
111375 #ifdef CONFIG_IP_ROUTE_MULTIPATH
111376 fib_sync_up(dev);
111377 #endif
111378- atomic_inc(&net->ipv4.dev_addr_genid);
111379+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
111380 rt_cache_flush(dev_net(dev));
111381 break;
111382 case NETDEV_DOWN:
111383 fib_del_ifaddr(ifa, NULL);
111384- atomic_inc(&net->ipv4.dev_addr_genid);
111385+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
111386 if (ifa->ifa_dev->ifa_list == NULL) {
111387 /* Last address was deleted from this interface.
111388 * Disable IP.
111389@@ -1063,7 +1063,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
111390 #ifdef CONFIG_IP_ROUTE_MULTIPATH
111391 fib_sync_up(dev);
111392 #endif
111393- atomic_inc(&net->ipv4.dev_addr_genid);
111394+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
111395 rt_cache_flush(net);
111396 break;
111397 case NETDEV_DOWN:
111398diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
111399index 1e2090e..351a724 100644
111400--- a/net/ipv4/fib_semantics.c
111401+++ b/net/ipv4/fib_semantics.c
111402@@ -753,7 +753,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh)
111403 nh->nh_saddr = inet_select_addr(nh->nh_dev,
111404 nh->nh_gw,
111405 nh->nh_parent->fib_scope);
111406- nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
111407+ nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
111408
111409 return nh->nh_saddr;
111410 }
111411diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c
111412index ff069f6..335e752 100644
111413--- a/net/ipv4/fou.c
111414+++ b/net/ipv4/fou.c
111415@@ -771,12 +771,12 @@ EXPORT_SYMBOL(gue_build_header);
111416
111417 #ifdef CONFIG_NET_FOU_IP_TUNNELS
111418
111419-static const struct ip_tunnel_encap_ops __read_mostly fou_iptun_ops = {
111420+static const struct ip_tunnel_encap_ops fou_iptun_ops = {
111421 .encap_hlen = fou_encap_hlen,
111422 .build_header = fou_build_header,
111423 };
111424
111425-static const struct ip_tunnel_encap_ops __read_mostly gue_iptun_ops = {
111426+static const struct ip_tunnel_encap_ops gue_iptun_ops = {
111427 .encap_hlen = gue_encap_hlen,
111428 .build_header = gue_build_header,
111429 };
111430diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
111431index 9111a4e..3576905 100644
111432--- a/net/ipv4/inet_hashtables.c
111433+++ b/net/ipv4/inet_hashtables.c
111434@@ -18,6 +18,7 @@
111435 #include <linux/sched.h>
111436 #include <linux/slab.h>
111437 #include <linux/wait.h>
111438+#include <linux/security.h>
111439
111440 #include <net/inet_connection_sock.h>
111441 #include <net/inet_hashtables.h>
111442@@ -49,6 +50,8 @@ static unsigned int inet_sk_ehashfn(const struct sock *sk)
111443 return inet_ehashfn(net, laddr, lport, faddr, fport);
111444 }
111445
111446+extern void gr_update_task_in_ip_table(const struct inet_sock *inet);
111447+
111448 /*
111449 * Allocate and initialize a new local port bind bucket.
111450 * The bindhash mutex for snum's hash chain must be held here.
111451@@ -554,6 +557,8 @@ ok:
111452 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
111453 spin_unlock(&head->lock);
111454
111455+ gr_update_task_in_ip_table(inet_sk(sk));
111456+
111457 if (tw) {
111458 inet_twsk_deschedule(tw, death_row);
111459 while (twrefcnt) {
111460diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
111461index 241afd7..31b95d5 100644
111462--- a/net/ipv4/inetpeer.c
111463+++ b/net/ipv4/inetpeer.c
111464@@ -461,7 +461,7 @@ relookup:
111465 if (p) {
111466 p->daddr = *daddr;
111467 atomic_set(&p->refcnt, 1);
111468- atomic_set(&p->rid, 0);
111469+ atomic_set_unchecked(&p->rid, 0);
111470 p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
111471 p->rate_tokens = 0;
111472 /* 60*HZ is arbitrary, but chosen enough high so that the first
111473diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
111474index 145a50c..5dd8cc5 100644
111475--- a/net/ipv4/ip_fragment.c
111476+++ b/net/ipv4/ip_fragment.c
111477@@ -268,7 +268,7 @@ static int ip_frag_too_far(struct ipq *qp)
111478 return 0;
111479
111480 start = qp->rid;
111481- end = atomic_inc_return(&peer->rid);
111482+ end = atomic_inc_return_unchecked(&peer->rid);
111483 qp->rid = end;
111484
111485 rc = qp->q.fragments && (end - start) > max;
111486@@ -748,12 +748,11 @@ static struct ctl_table ip4_frags_ctl_table[] = {
111487
111488 static int __net_init ip4_frags_ns_ctl_register(struct net *net)
111489 {
111490- struct ctl_table *table;
111491+ ctl_table_no_const *table = NULL;
111492 struct ctl_table_header *hdr;
111493
111494- table = ip4_frags_ns_ctl_table;
111495 if (!net_eq(net, &init_net)) {
111496- table = kmemdup(table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
111497+ table = kmemdup(ip4_frags_ns_ctl_table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
111498 if (table == NULL)
111499 goto err_alloc;
111500
111501@@ -767,9 +766,10 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
111502 /* Don't export sysctls to unprivileged users */
111503 if (net->user_ns != &init_user_ns)
111504 table[0].procname = NULL;
111505- }
111506+ hdr = register_net_sysctl(net, "net/ipv4", table);
111507+ } else
111508+ hdr = register_net_sysctl(net, "net/ipv4", ip4_frags_ns_ctl_table);
111509
111510- hdr = register_net_sysctl(net, "net/ipv4", table);
111511 if (hdr == NULL)
111512 goto err_reg;
111513
111514@@ -777,8 +777,7 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
111515 return 0;
111516
111517 err_reg:
111518- if (!net_eq(net, &init_net))
111519- kfree(table);
111520+ kfree(table);
111521 err_alloc:
111522 return -ENOMEM;
111523 }
111524diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
111525index 6207275f..00323a2 100644
111526--- a/net/ipv4/ip_gre.c
111527+++ b/net/ipv4/ip_gre.c
111528@@ -115,7 +115,7 @@ static bool log_ecn_error = true;
111529 module_param(log_ecn_error, bool, 0644);
111530 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
111531
111532-static struct rtnl_link_ops ipgre_link_ops __read_mostly;
111533+static struct rtnl_link_ops ipgre_link_ops;
111534 static int ipgre_tunnel_init(struct net_device *dev);
111535
111536 static int ipgre_net_id __read_mostly;
111537@@ -817,7 +817,7 @@ static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
111538 [IFLA_GRE_ENCAP_DPORT] = { .type = NLA_U16 },
111539 };
111540
111541-static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
111542+static struct rtnl_link_ops ipgre_link_ops = {
111543 .kind = "gre",
111544 .maxtype = IFLA_GRE_MAX,
111545 .policy = ipgre_policy,
111546@@ -832,7 +832,7 @@ static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
111547 .get_link_net = ip_tunnel_get_link_net,
111548 };
111549
111550-static struct rtnl_link_ops ipgre_tap_ops __read_mostly = {
111551+static struct rtnl_link_ops ipgre_tap_ops = {
111552 .kind = "gretap",
111553 .maxtype = IFLA_GRE_MAX,
111554 .policy = ipgre_policy,
111555diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
111556index 3d4da2c..40f9c29 100644
111557--- a/net/ipv4/ip_input.c
111558+++ b/net/ipv4/ip_input.c
111559@@ -147,6 +147,10 @@
111560 #include <linux/mroute.h>
111561 #include <linux/netlink.h>
111562
111563+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
111564+extern int grsec_enable_blackhole;
111565+#endif
111566+
111567 /*
111568 * Process Router Attention IP option (RFC 2113)
111569 */
111570@@ -223,6 +227,9 @@ static int ip_local_deliver_finish(struct sk_buff *skb)
111571 if (!raw) {
111572 if (xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
111573 IP_INC_STATS_BH(net, IPSTATS_MIB_INUNKNOWNPROTOS);
111574+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
111575+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
111576+#endif
111577 icmp_send(skb, ICMP_DEST_UNREACH,
111578 ICMP_PROT_UNREACH, 0);
111579 }
111580diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
111581index d9e8ff3..a70a150 100644
111582--- a/net/ipv4/ip_sockglue.c
111583+++ b/net/ipv4/ip_sockglue.c
111584@@ -1263,7 +1263,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
111585 len = min_t(unsigned int, len, opt->optlen);
111586 if (put_user(len, optlen))
111587 return -EFAULT;
111588- if (copy_to_user(optval, opt->__data, len))
111589+ if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) ||
111590+ copy_to_user(optval, opt->__data, len))
111591 return -EFAULT;
111592 return 0;
111593 }
111594@@ -1397,7 +1398,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
111595 if (sk->sk_type != SOCK_STREAM)
111596 return -ENOPROTOOPT;
111597
111598- msg.msg_control = (__force void *) optval;
111599+ msg.msg_control = (__force_kernel void *) optval;
111600 msg.msg_controllen = len;
111601 msg.msg_flags = flags;
111602
111603diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
111604index 94efe14..1453fcc 100644
111605--- a/net/ipv4/ip_vti.c
111606+++ b/net/ipv4/ip_vti.c
111607@@ -45,7 +45,7 @@
111608 #include <net/net_namespace.h>
111609 #include <net/netns/generic.h>
111610
111611-static struct rtnl_link_ops vti_link_ops __read_mostly;
111612+static struct rtnl_link_ops vti_link_ops;
111613
111614 static int vti_net_id __read_mostly;
111615 static int vti_tunnel_init(struct net_device *dev);
111616@@ -519,7 +519,7 @@ static const struct nla_policy vti_policy[IFLA_VTI_MAX + 1] = {
111617 [IFLA_VTI_REMOTE] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
111618 };
111619
111620-static struct rtnl_link_ops vti_link_ops __read_mostly = {
111621+static struct rtnl_link_ops vti_link_ops = {
111622 .kind = "vti",
111623 .maxtype = IFLA_VTI_MAX,
111624 .policy = vti_policy,
111625diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
111626index b26376e..fc3d733 100644
111627--- a/net/ipv4/ipconfig.c
111628+++ b/net/ipv4/ipconfig.c
111629@@ -333,7 +333,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
111630
111631 mm_segment_t oldfs = get_fs();
111632 set_fs(get_ds());
111633- res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
111634+ res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
111635 set_fs(oldfs);
111636 return res;
111637 }
111638@@ -344,7 +344,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
111639
111640 mm_segment_t oldfs = get_fs();
111641 set_fs(get_ds());
111642- res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
111643+ res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
111644 set_fs(oldfs);
111645 return res;
111646 }
111647@@ -355,7 +355,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
111648
111649 mm_segment_t oldfs = get_fs();
111650 set_fs(get_ds());
111651- res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
111652+ res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
111653 set_fs(oldfs);
111654 return res;
111655 }
111656diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
111657index 915d215..48d1db7 100644
111658--- a/net/ipv4/ipip.c
111659+++ b/net/ipv4/ipip.c
111660@@ -124,7 +124,7 @@ MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
111661 static int ipip_net_id __read_mostly;
111662
111663 static int ipip_tunnel_init(struct net_device *dev);
111664-static struct rtnl_link_ops ipip_link_ops __read_mostly;
111665+static struct rtnl_link_ops ipip_link_ops;
111666
111667 static int ipip_err(struct sk_buff *skb, u32 info)
111668 {
111669@@ -487,7 +487,7 @@ static const struct nla_policy ipip_policy[IFLA_IPTUN_MAX + 1] = {
111670 [IFLA_IPTUN_ENCAP_DPORT] = { .type = NLA_U16 },
111671 };
111672
111673-static struct rtnl_link_ops ipip_link_ops __read_mostly = {
111674+static struct rtnl_link_ops ipip_link_ops = {
111675 .kind = "ipip",
111676 .maxtype = IFLA_IPTUN_MAX,
111677 .policy = ipip_policy,
111678diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
111679index f95b6f9..2ee2097 100644
111680--- a/net/ipv4/netfilter/arp_tables.c
111681+++ b/net/ipv4/netfilter/arp_tables.c
111682@@ -885,14 +885,14 @@ static int compat_table_info(const struct xt_table_info *info,
111683 #endif
111684
111685 static int get_info(struct net *net, void __user *user,
111686- const int *len, int compat)
111687+ int len, int compat)
111688 {
111689 char name[XT_TABLE_MAXNAMELEN];
111690 struct xt_table *t;
111691 int ret;
111692
111693- if (*len != sizeof(struct arpt_getinfo)) {
111694- duprintf("length %u != %Zu\n", *len,
111695+ if (len != sizeof(struct arpt_getinfo)) {
111696+ duprintf("length %u != %Zu\n", len,
111697 sizeof(struct arpt_getinfo));
111698 return -EINVAL;
111699 }
111700@@ -929,7 +929,7 @@ static int get_info(struct net *net, void __user *user,
111701 info.size = private->size;
111702 strcpy(info.name, name);
111703
111704- if (copy_to_user(user, &info, *len) != 0)
111705+ if (copy_to_user(user, &info, len) != 0)
111706 ret = -EFAULT;
111707 else
111708 ret = 0;
111709@@ -1690,7 +1690,7 @@ static int compat_do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user,
111710
111711 switch (cmd) {
111712 case ARPT_SO_GET_INFO:
111713- ret = get_info(sock_net(sk), user, len, 1);
111714+ ret = get_info(sock_net(sk), user, *len, 1);
111715 break;
111716 case ARPT_SO_GET_ENTRIES:
111717 ret = compat_get_entries(sock_net(sk), user, len);
111718@@ -1735,7 +1735,7 @@ static int do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len
111719
111720 switch (cmd) {
111721 case ARPT_SO_GET_INFO:
111722- ret = get_info(sock_net(sk), user, len, 0);
111723+ ret = get_info(sock_net(sk), user, *len, 0);
111724 break;
111725
111726 case ARPT_SO_GET_ENTRIES:
111727diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
111728index cf5e82f..75a20f5 100644
111729--- a/net/ipv4/netfilter/ip_tables.c
111730+++ b/net/ipv4/netfilter/ip_tables.c
111731@@ -1073,14 +1073,14 @@ static int compat_table_info(const struct xt_table_info *info,
111732 #endif
111733
111734 static int get_info(struct net *net, void __user *user,
111735- const int *len, int compat)
111736+ int len, int compat)
111737 {
111738 char name[XT_TABLE_MAXNAMELEN];
111739 struct xt_table *t;
111740 int ret;
111741
111742- if (*len != sizeof(struct ipt_getinfo)) {
111743- duprintf("length %u != %zu\n", *len,
111744+ if (len != sizeof(struct ipt_getinfo)) {
111745+ duprintf("length %u != %zu\n", len,
111746 sizeof(struct ipt_getinfo));
111747 return -EINVAL;
111748 }
111749@@ -1117,7 +1117,7 @@ static int get_info(struct net *net, void __user *user,
111750 info.size = private->size;
111751 strcpy(info.name, name);
111752
111753- if (copy_to_user(user, &info, *len) != 0)
111754+ if (copy_to_user(user, &info, len) != 0)
111755 ret = -EFAULT;
111756 else
111757 ret = 0;
111758@@ -1973,7 +1973,7 @@ compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
111759
111760 switch (cmd) {
111761 case IPT_SO_GET_INFO:
111762- ret = get_info(sock_net(sk), user, len, 1);
111763+ ret = get_info(sock_net(sk), user, *len, 1);
111764 break;
111765 case IPT_SO_GET_ENTRIES:
111766 ret = compat_get_entries(sock_net(sk), user, len);
111767@@ -2020,7 +2020,7 @@ do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
111768
111769 switch (cmd) {
111770 case IPT_SO_GET_INFO:
111771- ret = get_info(sock_net(sk), user, len, 0);
111772+ ret = get_info(sock_net(sk), user, *len, 0);
111773 break;
111774
111775 case IPT_SO_GET_ENTRIES:
111776diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
111777index e90f83a..3e6acca 100644
111778--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
111779+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
111780@@ -720,7 +720,7 @@ static int clusterip_net_init(struct net *net)
111781 spin_lock_init(&cn->lock);
111782
111783 #ifdef CONFIG_PROC_FS
111784- cn->procdir = proc_mkdir("ipt_CLUSTERIP", net->proc_net);
111785+ cn->procdir = proc_mkdir_restrict("ipt_CLUSTERIP", net->proc_net);
111786 if (!cn->procdir) {
111787 pr_err("Unable to proc dir entry\n");
111788 return -ENOMEM;
111789diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
111790index 787b0d6..ab6c0ba 100644
111791--- a/net/ipv4/ping.c
111792+++ b/net/ipv4/ping.c
111793@@ -59,7 +59,7 @@ struct ping_table {
111794 };
111795
111796 static struct ping_table ping_table;
111797-struct pingv6_ops pingv6_ops;
111798+struct pingv6_ops *pingv6_ops;
111799 EXPORT_SYMBOL_GPL(pingv6_ops);
111800
111801 static u16 ping_port_rover;
111802@@ -359,7 +359,7 @@ static int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk,
111803 return -ENODEV;
111804 }
111805 }
111806- has_addr = pingv6_ops.ipv6_chk_addr(net, &addr->sin6_addr, dev,
111807+ has_addr = pingv6_ops->ipv6_chk_addr(net, &addr->sin6_addr, dev,
111808 scoped);
111809 rcu_read_unlock();
111810
111811@@ -567,7 +567,7 @@ void ping_err(struct sk_buff *skb, int offset, u32 info)
111812 }
111813 #if IS_ENABLED(CONFIG_IPV6)
111814 } else if (skb->protocol == htons(ETH_P_IPV6)) {
111815- harderr = pingv6_ops.icmpv6_err_convert(type, code, &err);
111816+ harderr = pingv6_ops->icmpv6_err_convert(type, code, &err);
111817 #endif
111818 }
111819
111820@@ -585,7 +585,7 @@ void ping_err(struct sk_buff *skb, int offset, u32 info)
111821 info, (u8 *)icmph);
111822 #if IS_ENABLED(CONFIG_IPV6)
111823 } else if (family == AF_INET6) {
111824- pingv6_ops.ipv6_icmp_error(sk, skb, err, 0,
111825+ pingv6_ops->ipv6_icmp_error(sk, skb, err, 0,
111826 info, (u8 *)icmph);
111827 #endif
111828 }
111829@@ -919,10 +919,10 @@ int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
111830 }
111831
111832 if (inet6_sk(sk)->rxopt.all)
111833- pingv6_ops.ip6_datagram_recv_common_ctl(sk, msg, skb);
111834+ pingv6_ops->ip6_datagram_recv_common_ctl(sk, msg, skb);
111835 if (skb->protocol == htons(ETH_P_IPV6) &&
111836 inet6_sk(sk)->rxopt.all)
111837- pingv6_ops.ip6_datagram_recv_specific_ctl(sk, msg, skb);
111838+ pingv6_ops->ip6_datagram_recv_specific_ctl(sk, msg, skb);
111839 else if (skb->protocol == htons(ETH_P_IP) && isk->cmsg_flags)
111840 ip_cmsg_recv(msg, skb);
111841 #endif
111842@@ -1117,7 +1117,7 @@ static void ping_v4_format_sock(struct sock *sp, struct seq_file *f,
111843 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
111844 0, sock_i_ino(sp),
111845 atomic_read(&sp->sk_refcnt), sp,
111846- atomic_read(&sp->sk_drops));
111847+ atomic_read_unchecked(&sp->sk_drops));
111848 }
111849
111850 static int ping_v4_seq_show(struct seq_file *seq, void *v)
111851diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
111852index f027a70..2e64edc 100644
111853--- a/net/ipv4/raw.c
111854+++ b/net/ipv4/raw.c
111855@@ -324,7 +324,7 @@ static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
111856 int raw_rcv(struct sock *sk, struct sk_buff *skb)
111857 {
111858 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
111859- atomic_inc(&sk->sk_drops);
111860+ atomic_inc_unchecked(&sk->sk_drops);
111861 kfree_skb(skb);
111862 return NET_RX_DROP;
111863 }
111864@@ -773,16 +773,20 @@ static int raw_init(struct sock *sk)
111865
111866 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
111867 {
111868+ struct icmp_filter filter;
111869+
111870 if (optlen > sizeof(struct icmp_filter))
111871 optlen = sizeof(struct icmp_filter);
111872- if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
111873+ if (copy_from_user(&filter, optval, optlen))
111874 return -EFAULT;
111875+ raw_sk(sk)->filter = filter;
111876 return 0;
111877 }
111878
111879 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
111880 {
111881 int len, ret = -EFAULT;
111882+ struct icmp_filter filter;
111883
111884 if (get_user(len, optlen))
111885 goto out;
111886@@ -792,8 +796,8 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
111887 if (len > sizeof(struct icmp_filter))
111888 len = sizeof(struct icmp_filter);
111889 ret = -EFAULT;
111890- if (put_user(len, optlen) ||
111891- copy_to_user(optval, &raw_sk(sk)->filter, len))
111892+ filter = raw_sk(sk)->filter;
111893+ if (put_user(len, optlen) || len > sizeof filter || copy_to_user(optval, &filter, len))
111894 goto out;
111895 ret = 0;
111896 out: return ret;
111897@@ -1022,7 +1026,7 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
111898 0, 0L, 0,
111899 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
111900 0, sock_i_ino(sp),
111901- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
111902+ atomic_read(&sp->sk_refcnt), sp, atomic_read_unchecked(&sp->sk_drops));
111903 }
111904
111905 static int raw_seq_show(struct seq_file *seq, void *v)
111906diff --git a/net/ipv4/route.c b/net/ipv4/route.c
111907index e262a08..d1fc3be 100644
111908--- a/net/ipv4/route.c
111909+++ b/net/ipv4/route.c
111910@@ -228,7 +228,7 @@ static const struct seq_operations rt_cache_seq_ops = {
111911
111912 static int rt_cache_seq_open(struct inode *inode, struct file *file)
111913 {
111914- return seq_open(file, &rt_cache_seq_ops);
111915+ return seq_open_restrict(file, &rt_cache_seq_ops);
111916 }
111917
111918 static const struct file_operations rt_cache_seq_fops = {
111919@@ -319,7 +319,7 @@ static const struct seq_operations rt_cpu_seq_ops = {
111920
111921 static int rt_cpu_seq_open(struct inode *inode, struct file *file)
111922 {
111923- return seq_open(file, &rt_cpu_seq_ops);
111924+ return seq_open_restrict(file, &rt_cpu_seq_ops);
111925 }
111926
111927 static const struct file_operations rt_cpu_seq_fops = {
111928@@ -357,7 +357,7 @@ static int rt_acct_proc_show(struct seq_file *m, void *v)
111929
111930 static int rt_acct_proc_open(struct inode *inode, struct file *file)
111931 {
111932- return single_open(file, rt_acct_proc_show, NULL);
111933+ return single_open_restrict(file, rt_acct_proc_show, NULL);
111934 }
111935
111936 static const struct file_operations rt_acct_proc_fops = {
111937@@ -459,11 +459,11 @@ static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
111938
111939 #define IP_IDENTS_SZ 2048u
111940 struct ip_ident_bucket {
111941- atomic_t id;
111942+ atomic_unchecked_t id;
111943 u32 stamp32;
111944 };
111945
111946-static struct ip_ident_bucket *ip_idents __read_mostly;
111947+static struct ip_ident_bucket ip_idents[IP_IDENTS_SZ] __read_mostly;
111948
111949 /* In order to protect privacy, we add a perturbation to identifiers
111950 * if one generator is seldom used. This makes hard for an attacker
111951@@ -479,7 +479,7 @@ u32 ip_idents_reserve(u32 hash, int segs)
111952 if (old != now && cmpxchg(&bucket->stamp32, old, now) == old)
111953 delta = prandom_u32_max(now - old);
111954
111955- return atomic_add_return(segs + delta, &bucket->id) - segs;
111956+ return atomic_add_return_unchecked(segs + delta, &bucket->id) - segs;
111957 }
111958 EXPORT_SYMBOL(ip_idents_reserve);
111959
111960@@ -2643,34 +2643,34 @@ static struct ctl_table ipv4_route_flush_table[] = {
111961 .maxlen = sizeof(int),
111962 .mode = 0200,
111963 .proc_handler = ipv4_sysctl_rtcache_flush,
111964+ .extra1 = &init_net,
111965 },
111966 { },
111967 };
111968
111969 static __net_init int sysctl_route_net_init(struct net *net)
111970 {
111971- struct ctl_table *tbl;
111972+ ctl_table_no_const *tbl = NULL;
111973
111974- tbl = ipv4_route_flush_table;
111975 if (!net_eq(net, &init_net)) {
111976- tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL);
111977+ tbl = kmemdup(ipv4_route_flush_table, sizeof(ipv4_route_flush_table), GFP_KERNEL);
111978 if (tbl == NULL)
111979 goto err_dup;
111980
111981 /* Don't export sysctls to unprivileged users */
111982 if (net->user_ns != &init_user_ns)
111983 tbl[0].procname = NULL;
111984- }
111985- tbl[0].extra1 = net;
111986+ tbl[0].extra1 = net;
111987+ net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
111988+ } else
111989+ net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", ipv4_route_flush_table);
111990
111991- net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
111992 if (net->ipv4.route_hdr == NULL)
111993 goto err_reg;
111994 return 0;
111995
111996 err_reg:
111997- if (tbl != ipv4_route_flush_table)
111998- kfree(tbl);
111999+ kfree(tbl);
112000 err_dup:
112001 return -ENOMEM;
112002 }
112003@@ -2693,8 +2693,8 @@ static __net_initdata struct pernet_operations sysctl_route_ops = {
112004
112005 static __net_init int rt_genid_init(struct net *net)
112006 {
112007- atomic_set(&net->ipv4.rt_genid, 0);
112008- atomic_set(&net->fnhe_genid, 0);
112009+ atomic_set_unchecked(&net->ipv4.rt_genid, 0);
112010+ atomic_set_unchecked(&net->fnhe_genid, 0);
112011 get_random_bytes(&net->ipv4.dev_addr_genid,
112012 sizeof(net->ipv4.dev_addr_genid));
112013 return 0;
112014@@ -2738,11 +2738,7 @@ int __init ip_rt_init(void)
112015 int rc = 0;
112016 int cpu;
112017
112018- ip_idents = kmalloc(IP_IDENTS_SZ * sizeof(*ip_idents), GFP_KERNEL);
112019- if (!ip_idents)
112020- panic("IP: failed to allocate ip_idents\n");
112021-
112022- prandom_bytes(ip_idents, IP_IDENTS_SZ * sizeof(*ip_idents));
112023+ prandom_bytes(ip_idents, sizeof(ip_idents));
112024
112025 for_each_possible_cpu(cpu) {
112026 struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu);
112027diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
112028index d151539..5f5e247 100644
112029--- a/net/ipv4/sysctl_net_ipv4.c
112030+++ b/net/ipv4/sysctl_net_ipv4.c
112031@@ -60,7 +60,7 @@ static int ipv4_local_port_range(struct ctl_table *table, int write,
112032 container_of(table->data, struct net, ipv4.ip_local_ports.range);
112033 int ret;
112034 int range[2];
112035- struct ctl_table tmp = {
112036+ ctl_table_no_const tmp = {
112037 .data = &range,
112038 .maxlen = sizeof(range),
112039 .mode = table->mode,
112040@@ -118,7 +118,7 @@ static int ipv4_ping_group_range(struct ctl_table *table, int write,
112041 int ret;
112042 gid_t urange[2];
112043 kgid_t low, high;
112044- struct ctl_table tmp = {
112045+ ctl_table_no_const tmp = {
112046 .data = &urange,
112047 .maxlen = sizeof(urange),
112048 .mode = table->mode,
112049@@ -149,7 +149,7 @@ static int proc_tcp_congestion_control(struct ctl_table *ctl, int write,
112050 void __user *buffer, size_t *lenp, loff_t *ppos)
112051 {
112052 char val[TCP_CA_NAME_MAX];
112053- struct ctl_table tbl = {
112054+ ctl_table_no_const tbl = {
112055 .data = val,
112056 .maxlen = TCP_CA_NAME_MAX,
112057 };
112058@@ -168,7 +168,7 @@ static int proc_tcp_available_congestion_control(struct ctl_table *ctl,
112059 void __user *buffer, size_t *lenp,
112060 loff_t *ppos)
112061 {
112062- struct ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX, };
112063+ ctl_table_no_const tbl = { .maxlen = TCP_CA_BUF_MAX, };
112064 int ret;
112065
112066 tbl.data = kmalloc(tbl.maxlen, GFP_USER);
112067@@ -185,7 +185,7 @@ static int proc_allowed_congestion_control(struct ctl_table *ctl,
112068 void __user *buffer, size_t *lenp,
112069 loff_t *ppos)
112070 {
112071- struct ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX };
112072+ ctl_table_no_const tbl = { .maxlen = TCP_CA_BUF_MAX };
112073 int ret;
112074
112075 tbl.data = kmalloc(tbl.maxlen, GFP_USER);
112076@@ -204,7 +204,7 @@ static int proc_tcp_fastopen_key(struct ctl_table *ctl, int write,
112077 void __user *buffer, size_t *lenp,
112078 loff_t *ppos)
112079 {
112080- struct ctl_table tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
112081+ ctl_table_no_const tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
112082 struct tcp_fastopen_context *ctxt;
112083 int ret;
112084 u32 user_key[4]; /* 16 bytes, matching TCP_FASTOPEN_KEY_LENGTH */
112085@@ -888,13 +888,12 @@ static struct ctl_table ipv4_net_table[] = {
112086
112087 static __net_init int ipv4_sysctl_init_net(struct net *net)
112088 {
112089- struct ctl_table *table;
112090+ ctl_table_no_const *table = NULL;
112091
112092- table = ipv4_net_table;
112093 if (!net_eq(net, &init_net)) {
112094 int i;
112095
112096- table = kmemdup(table, sizeof(ipv4_net_table), GFP_KERNEL);
112097+ table = kmemdup(ipv4_net_table, sizeof(ipv4_net_table), GFP_KERNEL);
112098 if (table == NULL)
112099 goto err_alloc;
112100
112101@@ -903,7 +902,10 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
112102 table[i].data += (void *)net - (void *)&init_net;
112103 }
112104
112105- net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
112106+ if (!net_eq(net, &init_net))
112107+ net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
112108+ else
112109+ net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", ipv4_net_table);
112110 if (net->ipv4.ipv4_hdr == NULL)
112111 goto err_reg;
112112
112113diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
112114index d03a344..f3bbb71 100644
112115--- a/net/ipv4/tcp.c
112116+++ b/net/ipv4/tcp.c
112117@@ -520,8 +520,10 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
112118
112119 /* Race breaker. If space is freed after
112120 * wspace test but before the flags are set,
112121- * IO signal will be lost.
112122+ * IO signal will be lost. Memory barrier
112123+ * pairs with the input side.
112124 */
112125+ smp_mb__after_atomic();
112126 if (sk_stream_is_writeable(sk))
112127 mask |= POLLOUT | POLLWRNORM;
112128 }
112129diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
112130index f501ac04..0c5a1b2 100644
112131--- a/net/ipv4/tcp_input.c
112132+++ b/net/ipv4/tcp_input.c
112133@@ -767,7 +767,7 @@ static void tcp_update_pacing_rate(struct sock *sk)
112134 * without any lock. We want to make sure compiler wont store
112135 * intermediate values in this location.
112136 */
112137- ACCESS_ONCE(sk->sk_pacing_rate) = min_t(u64, rate,
112138+ ACCESS_ONCE_RW(sk->sk_pacing_rate) = min_t(u64, rate,
112139 sk->sk_max_pacing_rate);
112140 }
112141
112142@@ -4541,7 +4541,7 @@ static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
112143 * simplifies code)
112144 */
112145 static void
112146-tcp_collapse(struct sock *sk, struct sk_buff_head *list,
112147+__intentional_overflow(5,6) tcp_collapse(struct sock *sk, struct sk_buff_head *list,
112148 struct sk_buff *head, struct sk_buff *tail,
112149 u32 start, u32 end)
112150 {
112151@@ -4799,6 +4799,8 @@ static void tcp_check_space(struct sock *sk)
112152 {
112153 if (sock_flag(sk, SOCK_QUEUE_SHRUNK)) {
112154 sock_reset_flag(sk, SOCK_QUEUE_SHRUNK);
112155+ /* pairs with tcp_poll() */
112156+ smp_mb__after_atomic();
112157 if (sk->sk_socket &&
112158 test_bit(SOCK_NOSPACE, &sk->sk_socket->flags))
112159 tcp_new_space(sk);
112160@@ -5525,6 +5527,7 @@ discard:
112161 tcp_paws_reject(&tp->rx_opt, 0))
112162 goto discard_and_undo;
112163
112164+#ifndef CONFIG_GRKERNSEC_NO_SIMULT_CONNECT
112165 if (th->syn) {
112166 /* We see SYN without ACK. It is attempt of
112167 * simultaneous connect with crossed SYNs.
112168@@ -5575,6 +5578,7 @@ discard:
112169 goto discard;
112170 #endif
112171 }
112172+#endif
112173 /* "fifth, if neither of the SYN or RST bits is set then
112174 * drop the segment and return."
112175 */
112176@@ -5621,7 +5625,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
112177 goto discard;
112178
112179 if (th->syn) {
112180- if (th->fin)
112181+ if (th->fin || th->urg || th->psh)
112182 goto discard;
112183 if (icsk->icsk_af_ops->conn_request(sk, skb) < 0)
112184 return 1;
112185diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
112186index f1756ee..8908cb0 100644
112187--- a/net/ipv4/tcp_ipv4.c
112188+++ b/net/ipv4/tcp_ipv4.c
112189@@ -89,6 +89,10 @@ int sysctl_tcp_tw_reuse __read_mostly;
112190 int sysctl_tcp_low_latency __read_mostly;
112191 EXPORT_SYMBOL(sysctl_tcp_low_latency);
112192
112193+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
112194+extern int grsec_enable_blackhole;
112195+#endif
112196+
112197 #ifdef CONFIG_TCP_MD5SIG
112198 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
112199 __be32 daddr, __be32 saddr, const struct tcphdr *th);
112200@@ -1475,6 +1479,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
112201 return 0;
112202
112203 reset:
112204+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
112205+ if (!grsec_enable_blackhole)
112206+#endif
112207 tcp_v4_send_reset(rsk, skb);
112208 discard:
112209 kfree_skb(skb);
112210@@ -1639,12 +1646,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
112211 TCP_SKB_CB(skb)->sacked = 0;
112212
112213 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
112214- if (!sk)
112215+ if (!sk) {
112216+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
112217+ ret = 1;
112218+#endif
112219 goto no_tcp_socket;
112220-
112221+ }
112222 process:
112223- if (sk->sk_state == TCP_TIME_WAIT)
112224+ if (sk->sk_state == TCP_TIME_WAIT) {
112225+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
112226+ ret = 2;
112227+#endif
112228 goto do_time_wait;
112229+ }
112230
112231 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
112232 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
112233@@ -1700,6 +1714,10 @@ csum_error:
112234 bad_packet:
112235 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
112236 } else {
112237+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
112238+ if (!grsec_enable_blackhole || (ret == 1 &&
112239+ (skb->dev->flags & IFF_LOOPBACK)))
112240+#endif
112241 tcp_v4_send_reset(NULL, skb);
112242 }
112243
112244diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
112245index 50277af..defe393 100644
112246--- a/net/ipv4/tcp_minisocks.c
112247+++ b/net/ipv4/tcp_minisocks.c
112248@@ -27,6 +27,10 @@
112249 #include <net/inet_common.h>
112250 #include <net/xfrm.h>
112251
112252+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
112253+extern int grsec_enable_blackhole;
112254+#endif
112255+
112256 int sysctl_tcp_syncookies __read_mostly = 1;
112257 EXPORT_SYMBOL(sysctl_tcp_syncookies);
112258
112259@@ -788,7 +792,10 @@ embryonic_reset:
112260 * avoid becoming vulnerable to outside attack aiming at
112261 * resetting legit local connections.
112262 */
112263- req->rsk_ops->send_reset(sk, skb);
112264+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
112265+ if (!grsec_enable_blackhole)
112266+#endif
112267+ req->rsk_ops->send_reset(sk, skb);
112268 } else if (fastopen) { /* received a valid RST pkt */
112269 reqsk_fastopen_remove(sk, req, true);
112270 tcp_reset(sk);
112271diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
112272index ebf5ff5..4d1ff32 100644
112273--- a/net/ipv4/tcp_probe.c
112274+++ b/net/ipv4/tcp_probe.c
112275@@ -236,7 +236,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
112276 if (cnt + width >= len)
112277 break;
112278
112279- if (copy_to_user(buf + cnt, tbuf, width))
112280+ if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
112281 return -EFAULT;
112282 cnt += width;
112283 }
112284diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
112285index 0732b78..a82bdc6 100644
112286--- a/net/ipv4/tcp_timer.c
112287+++ b/net/ipv4/tcp_timer.c
112288@@ -22,6 +22,10 @@
112289 #include <linux/gfp.h>
112290 #include <net/tcp.h>
112291
112292+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
112293+extern int grsec_lastack_retries;
112294+#endif
112295+
112296 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
112297 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
112298 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
112299@@ -194,6 +198,13 @@ static int tcp_write_timeout(struct sock *sk)
112300 }
112301 }
112302
112303+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
112304+ if ((sk->sk_state == TCP_LAST_ACK) &&
112305+ (grsec_lastack_retries > 0) &&
112306+ (grsec_lastack_retries < retry_until))
112307+ retry_until = grsec_lastack_retries;
112308+#endif
112309+
112310 if (retransmits_timed_out(sk, retry_until,
112311 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
112312 /* Has it gone just too far? */
112313diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
112314index 51f1745..4bc0427 100644
112315--- a/net/ipv4/udp.c
112316+++ b/net/ipv4/udp.c
112317@@ -87,6 +87,7 @@
112318 #include <linux/types.h>
112319 #include <linux/fcntl.h>
112320 #include <linux/module.h>
112321+#include <linux/security.h>
112322 #include <linux/socket.h>
112323 #include <linux/sockios.h>
112324 #include <linux/igmp.h>
112325@@ -115,6 +116,10 @@
112326 #include <net/busy_poll.h>
112327 #include "udp_impl.h"
112328
112329+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
112330+extern int grsec_enable_blackhole;
112331+#endif
112332+
112333 struct udp_table udp_table __read_mostly;
112334 EXPORT_SYMBOL(udp_table);
112335
112336@@ -609,6 +614,9 @@ static inline bool __udp_is_mcast_sock(struct net *net, struct sock *sk,
112337 return true;
112338 }
112339
112340+extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
112341+extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
112342+
112343 /*
112344 * This routine is called by the ICMP module when it gets some
112345 * sort of error condition. If err < 0 then the socket should
112346@@ -946,9 +954,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
112347 dport = usin->sin_port;
112348 if (dport == 0)
112349 return -EINVAL;
112350+
112351+ err = gr_search_udp_sendmsg(sk, usin);
112352+ if (err)
112353+ return err;
112354 } else {
112355 if (sk->sk_state != TCP_ESTABLISHED)
112356 return -EDESTADDRREQ;
112357+
112358+ err = gr_search_udp_sendmsg(sk, NULL);
112359+ if (err)
112360+ return err;
112361+
112362 daddr = inet->inet_daddr;
112363 dport = inet->inet_dport;
112364 /* Open fast path for connected socket.
112365@@ -1196,7 +1213,7 @@ static unsigned int first_packet_length(struct sock *sk)
112366 IS_UDPLITE(sk));
112367 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
112368 IS_UDPLITE(sk));
112369- atomic_inc(&sk->sk_drops);
112370+ atomic_inc_unchecked(&sk->sk_drops);
112371 __skb_unlink(skb, rcvq);
112372 __skb_queue_tail(&list_kill, skb);
112373 }
112374@@ -1276,6 +1293,10 @@ try_again:
112375 if (!skb)
112376 goto out;
112377
112378+ err = gr_search_udp_recvmsg(sk, skb);
112379+ if (err)
112380+ goto out_free;
112381+
112382 ulen = skb->len - sizeof(struct udphdr);
112383 copied = len;
112384 if (copied > ulen)
112385@@ -1308,7 +1329,7 @@ try_again:
112386 if (unlikely(err)) {
112387 trace_kfree_skb(skb, udp_recvmsg);
112388 if (!peeked) {
112389- atomic_inc(&sk->sk_drops);
112390+ atomic_inc_unchecked(&sk->sk_drops);
112391 UDP_INC_STATS_USER(sock_net(sk),
112392 UDP_MIB_INERRORS, is_udplite);
112393 }
112394@@ -1604,7 +1625,7 @@ csum_error:
112395 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
112396 drop:
112397 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
112398- atomic_inc(&sk->sk_drops);
112399+ atomic_inc_unchecked(&sk->sk_drops);
112400 kfree_skb(skb);
112401 return -1;
112402 }
112403@@ -1623,7 +1644,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
112404 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
112405
112406 if (!skb1) {
112407- atomic_inc(&sk->sk_drops);
112408+ atomic_inc_unchecked(&sk->sk_drops);
112409 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
112410 IS_UDPLITE(sk));
112411 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
112412@@ -1829,6 +1850,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
112413 goto csum_error;
112414
112415 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
112416+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
112417+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
112418+#endif
112419 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
112420
112421 /*
112422@@ -2426,7 +2450,7 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
112423 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
112424 0, sock_i_ino(sp),
112425 atomic_read(&sp->sk_refcnt), sp,
112426- atomic_read(&sp->sk_drops));
112427+ atomic_read_unchecked(&sp->sk_drops));
112428 }
112429
112430 int udp4_seq_show(struct seq_file *seq, void *v)
112431diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
112432index 6156f68..d6ab46d 100644
112433--- a/net/ipv4/xfrm4_policy.c
112434+++ b/net/ipv4/xfrm4_policy.c
112435@@ -186,11 +186,11 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
112436 fl4->flowi4_tos = iph->tos;
112437 }
112438
112439-static inline int xfrm4_garbage_collect(struct dst_ops *ops)
112440+static int xfrm4_garbage_collect(struct dst_ops *ops)
112441 {
112442 struct net *net = container_of(ops, struct net, xfrm.xfrm4_dst_ops);
112443
112444- xfrm4_policy_afinfo.garbage_collect(net);
112445+ xfrm_garbage_collect_deferred(net);
112446 return (dst_entries_get_slow(ops) > ops->gc_thresh * 2);
112447 }
112448
112449@@ -269,19 +269,18 @@ static struct ctl_table xfrm4_policy_table[] = {
112450
112451 static int __net_init xfrm4_net_init(struct net *net)
112452 {
112453- struct ctl_table *table;
112454+ ctl_table_no_const *table = NULL;
112455 struct ctl_table_header *hdr;
112456
112457- table = xfrm4_policy_table;
112458 if (!net_eq(net, &init_net)) {
112459- table = kmemdup(table, sizeof(xfrm4_policy_table), GFP_KERNEL);
112460+ table = kmemdup(xfrm4_policy_table, sizeof(xfrm4_policy_table), GFP_KERNEL);
112461 if (!table)
112462 goto err_alloc;
112463
112464 table[0].data = &net->xfrm.xfrm4_dst_ops.gc_thresh;
112465- }
112466-
112467- hdr = register_net_sysctl(net, "net/ipv4", table);
112468+ hdr = register_net_sysctl(net, "net/ipv4", table);
112469+ } else
112470+ hdr = register_net_sysctl(net, "net/ipv4", xfrm4_policy_table);
112471 if (!hdr)
112472 goto err_reg;
112473
112474@@ -289,8 +288,7 @@ static int __net_init xfrm4_net_init(struct net *net)
112475 return 0;
112476
112477 err_reg:
112478- if (!net_eq(net, &init_net))
112479- kfree(table);
112480+ kfree(table);
112481 err_alloc:
112482 return -ENOMEM;
112483 }
112484diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
112485index b603002..0de5c88 100644
112486--- a/net/ipv6/addrconf.c
112487+++ b/net/ipv6/addrconf.c
112488@@ -171,7 +171,7 @@ static struct ipv6_devconf ipv6_devconf __read_mostly = {
112489 .hop_limit = IPV6_DEFAULT_HOPLIMIT,
112490 .mtu6 = IPV6_MIN_MTU,
112491 .accept_ra = 1,
112492- .accept_redirects = 1,
112493+ .accept_redirects = 0,
112494 .autoconf = 1,
112495 .force_mld_version = 0,
112496 .mldv1_unsolicited_report_interval = 10 * HZ,
112497@@ -209,7 +209,7 @@ static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
112498 .hop_limit = IPV6_DEFAULT_HOPLIMIT,
112499 .mtu6 = IPV6_MIN_MTU,
112500 .accept_ra = 1,
112501- .accept_redirects = 1,
112502+ .accept_redirects = 0,
112503 .autoconf = 1,
112504 .force_mld_version = 0,
112505 .mldv1_unsolicited_report_interval = 10 * HZ,
112506@@ -607,7 +607,7 @@ static int inet6_netconf_dump_devconf(struct sk_buff *skb,
112507 idx = 0;
112508 head = &net->dev_index_head[h];
112509 rcu_read_lock();
112510- cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^
112511+ cb->seq = atomic_read_unchecked(&net->ipv6.dev_addr_genid) ^
112512 net->dev_base_seq;
112513 hlist_for_each_entry_rcu(dev, head, index_hlist) {
112514 if (idx < s_idx)
112515@@ -2438,7 +2438,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
112516 p.iph.ihl = 5;
112517 p.iph.protocol = IPPROTO_IPV6;
112518 p.iph.ttl = 64;
112519- ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
112520+ ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
112521
112522 if (ops->ndo_do_ioctl) {
112523 mm_segment_t oldfs = get_fs();
112524@@ -3587,16 +3587,23 @@ static const struct file_operations if6_fops = {
112525 .release = seq_release_net,
112526 };
112527
112528+extern void register_ipv6_seq_ops_addr(struct seq_operations *addr);
112529+extern void unregister_ipv6_seq_ops_addr(void);
112530+
112531 static int __net_init if6_proc_net_init(struct net *net)
112532 {
112533- if (!proc_create("if_inet6", S_IRUGO, net->proc_net, &if6_fops))
112534+ register_ipv6_seq_ops_addr(&if6_seq_ops);
112535+ if (!proc_create("if_inet6", S_IRUGO, net->proc_net, &if6_fops)) {
112536+ unregister_ipv6_seq_ops_addr();
112537 return -ENOMEM;
112538+ }
112539 return 0;
112540 }
112541
112542 static void __net_exit if6_proc_net_exit(struct net *net)
112543 {
112544 remove_proc_entry("if_inet6", net->proc_net);
112545+ unregister_ipv6_seq_ops_addr();
112546 }
112547
112548 static struct pernet_operations if6_proc_net_ops = {
112549@@ -4215,7 +4222,7 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
112550 s_ip_idx = ip_idx = cb->args[2];
112551
112552 rcu_read_lock();
112553- cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^ net->dev_base_seq;
112554+ cb->seq = atomic_read_unchecked(&net->ipv6.dev_addr_genid) ^ net->dev_base_seq;
112555 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
112556 idx = 0;
112557 head = &net->dev_index_head[h];
112558@@ -4864,7 +4871,7 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
112559 rt_genid_bump_ipv6(net);
112560 break;
112561 }
112562- atomic_inc(&net->ipv6.dev_addr_genid);
112563+ atomic_inc_unchecked(&net->ipv6.dev_addr_genid);
112564 }
112565
112566 static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
112567@@ -4884,7 +4891,7 @@ int addrconf_sysctl_forward(struct ctl_table *ctl, int write,
112568 int *valp = ctl->data;
112569 int val = *valp;
112570 loff_t pos = *ppos;
112571- struct ctl_table lctl;
112572+ ctl_table_no_const lctl;
112573 int ret;
112574
112575 /*
112576@@ -4909,7 +4916,7 @@ int addrconf_sysctl_mtu(struct ctl_table *ctl, int write,
112577 {
112578 struct inet6_dev *idev = ctl->extra1;
112579 int min_mtu = IPV6_MIN_MTU;
112580- struct ctl_table lctl;
112581+ ctl_table_no_const lctl;
112582
112583 lctl = *ctl;
112584 lctl.extra1 = &min_mtu;
112585@@ -4984,7 +4991,7 @@ int addrconf_sysctl_disable(struct ctl_table *ctl, int write,
112586 int *valp = ctl->data;
112587 int val = *valp;
112588 loff_t pos = *ppos;
112589- struct ctl_table lctl;
112590+ ctl_table_no_const lctl;
112591 int ret;
112592
112593 /*
112594diff --git a/net/ipv6/addrconf_core.c b/net/ipv6/addrconf_core.c
112595index 98cc4cd..0768c4e 100644
112596--- a/net/ipv6/addrconf_core.c
112597+++ b/net/ipv6/addrconf_core.c
112598@@ -133,6 +133,14 @@ static void snmp6_free_dev(struct inet6_dev *idev)
112599 free_percpu(idev->stats.ipv6);
112600 }
112601
112602+static void in6_dev_finish_destroy_rcu(struct rcu_head *head)
112603+{
112604+ struct inet6_dev *idev = container_of(head, struct inet6_dev, rcu);
112605+
112606+ snmp6_free_dev(idev);
112607+ kfree(idev);
112608+}
112609+
112610 /* Nobody refers to this device, we may destroy it. */
112611
112612 void in6_dev_finish_destroy(struct inet6_dev *idev)
112613@@ -151,7 +159,6 @@ void in6_dev_finish_destroy(struct inet6_dev *idev)
112614 pr_warn("Freeing alive inet6 device %p\n", idev);
112615 return;
112616 }
112617- snmp6_free_dev(idev);
112618- kfree_rcu(idev, rcu);
112619+ call_rcu(&idev->rcu, in6_dev_finish_destroy_rcu);
112620 }
112621 EXPORT_SYMBOL(in6_dev_finish_destroy);
112622diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
112623index e8c4400..a4cd5da 100644
112624--- a/net/ipv6/af_inet6.c
112625+++ b/net/ipv6/af_inet6.c
112626@@ -766,7 +766,7 @@ static int __net_init inet6_net_init(struct net *net)
112627 net->ipv6.sysctl.icmpv6_time = 1*HZ;
112628 net->ipv6.sysctl.flowlabel_consistency = 1;
112629 net->ipv6.sysctl.auto_flowlabels = 0;
112630- atomic_set(&net->ipv6.fib6_sernum, 1);
112631+ atomic_set_unchecked(&net->ipv6.fib6_sernum, 1);
112632
112633 err = ipv6_init_mibs(net);
112634 if (err)
112635diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
112636index d174b91..34801a1 100644
112637--- a/net/ipv6/datagram.c
112638+++ b/net/ipv6/datagram.c
112639@@ -967,5 +967,5 @@ void ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp,
112640 0,
112641 sock_i_ino(sp),
112642 atomic_read(&sp->sk_refcnt), sp,
112643- atomic_read(&sp->sk_drops));
112644+ atomic_read_unchecked(&sp->sk_drops));
112645 }
112646diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
112647index a5e9519..16b7412 100644
112648--- a/net/ipv6/icmp.c
112649+++ b/net/ipv6/icmp.c
112650@@ -1005,7 +1005,7 @@ static struct ctl_table ipv6_icmp_table_template[] = {
112651
112652 struct ctl_table * __net_init ipv6_icmp_sysctl_init(struct net *net)
112653 {
112654- struct ctl_table *table;
112655+ ctl_table_no_const *table;
112656
112657 table = kmemdup(ipv6_icmp_table_template,
112658 sizeof(ipv6_icmp_table_template),
112659diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
112660index 263ef41..88c7be8 100644
112661--- a/net/ipv6/ip6_fib.c
112662+++ b/net/ipv6/ip6_fib.c
112663@@ -99,9 +99,9 @@ static int fib6_new_sernum(struct net *net)
112664 int new, old;
112665
112666 do {
112667- old = atomic_read(&net->ipv6.fib6_sernum);
112668+ old = atomic_read_unchecked(&net->ipv6.fib6_sernum);
112669 new = old < INT_MAX ? old + 1 : 1;
112670- } while (atomic_cmpxchg(&net->ipv6.fib6_sernum,
112671+ } while (atomic_cmpxchg_unchecked(&net->ipv6.fib6_sernum,
112672 old, new) != old);
112673 return new;
112674 }
112675diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
112676index bc28b7d..a08feea 100644
112677--- a/net/ipv6/ip6_gre.c
112678+++ b/net/ipv6/ip6_gre.c
112679@@ -71,8 +71,8 @@ struct ip6gre_net {
112680 struct net_device *fb_tunnel_dev;
112681 };
112682
112683-static struct rtnl_link_ops ip6gre_link_ops __read_mostly;
112684-static struct rtnl_link_ops ip6gre_tap_ops __read_mostly;
112685+static struct rtnl_link_ops ip6gre_link_ops;
112686+static struct rtnl_link_ops ip6gre_tap_ops;
112687 static int ip6gre_tunnel_init(struct net_device *dev);
112688 static void ip6gre_tunnel_setup(struct net_device *dev);
112689 static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t);
112690@@ -1289,7 +1289,7 @@ static void ip6gre_fb_tunnel_init(struct net_device *dev)
112691 }
112692
112693
112694-static struct inet6_protocol ip6gre_protocol __read_mostly = {
112695+static struct inet6_protocol ip6gre_protocol = {
112696 .handler = ip6gre_rcv,
112697 .err_handler = ip6gre_err,
112698 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
112699@@ -1650,7 +1650,7 @@ static const struct nla_policy ip6gre_policy[IFLA_GRE_MAX + 1] = {
112700 [IFLA_GRE_FLAGS] = { .type = NLA_U32 },
112701 };
112702
112703-static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
112704+static struct rtnl_link_ops ip6gre_link_ops = {
112705 .kind = "ip6gre",
112706 .maxtype = IFLA_GRE_MAX,
112707 .policy = ip6gre_policy,
112708@@ -1665,7 +1665,7 @@ static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
112709 .get_link_net = ip6_tnl_get_link_net,
112710 };
112711
112712-static struct rtnl_link_ops ip6gre_tap_ops __read_mostly = {
112713+static struct rtnl_link_ops ip6gre_tap_ops = {
112714 .kind = "ip6gretap",
112715 .maxtype = IFLA_GRE_MAX,
112716 .policy = ip6gre_policy,
112717diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
112718index ddd94ec..b7cfefb 100644
112719--- a/net/ipv6/ip6_tunnel.c
112720+++ b/net/ipv6/ip6_tunnel.c
112721@@ -86,7 +86,7 @@ static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
112722
112723 static int ip6_tnl_dev_init(struct net_device *dev);
112724 static void ip6_tnl_dev_setup(struct net_device *dev);
112725-static struct rtnl_link_ops ip6_link_ops __read_mostly;
112726+static struct rtnl_link_ops ip6_link_ops;
112727
112728 static int ip6_tnl_net_id __read_mostly;
112729 struct ip6_tnl_net {
112730@@ -1780,7 +1780,7 @@ static const struct nla_policy ip6_tnl_policy[IFLA_IPTUN_MAX + 1] = {
112731 [IFLA_IPTUN_PROTO] = { .type = NLA_U8 },
112732 };
112733
112734-static struct rtnl_link_ops ip6_link_ops __read_mostly = {
112735+static struct rtnl_link_ops ip6_link_ops = {
112736 .kind = "ip6tnl",
112737 .maxtype = IFLA_IPTUN_MAX,
112738 .policy = ip6_tnl_policy,
112739diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
112740index 5fb9e21..92bf04b 100644
112741--- a/net/ipv6/ip6_vti.c
112742+++ b/net/ipv6/ip6_vti.c
112743@@ -62,7 +62,7 @@ static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
112744
112745 static int vti6_dev_init(struct net_device *dev);
112746 static void vti6_dev_setup(struct net_device *dev);
112747-static struct rtnl_link_ops vti6_link_ops __read_mostly;
112748+static struct rtnl_link_ops vti6_link_ops;
112749
112750 static int vti6_net_id __read_mostly;
112751 struct vti6_net {
112752@@ -1004,7 +1004,7 @@ static const struct nla_policy vti6_policy[IFLA_VTI_MAX + 1] = {
112753 [IFLA_VTI_OKEY] = { .type = NLA_U32 },
112754 };
112755
112756-static struct rtnl_link_ops vti6_link_ops __read_mostly = {
112757+static struct rtnl_link_ops vti6_link_ops = {
112758 .kind = "vti6",
112759 .maxtype = IFLA_VTI_MAX,
112760 .policy = vti6_policy,
112761diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
112762index 8d766d9..dcdfea7 100644
112763--- a/net/ipv6/ipv6_sockglue.c
112764+++ b/net/ipv6/ipv6_sockglue.c
112765@@ -989,7 +989,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
112766 if (sk->sk_type != SOCK_STREAM)
112767 return -ENOPROTOOPT;
112768
112769- msg.msg_control = optval;
112770+ msg.msg_control = (void __force_kernel *)optval;
112771 msg.msg_controllen = len;
112772 msg.msg_flags = flags;
112773
112774diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
112775index bb00c6f..16c90d7 100644
112776--- a/net/ipv6/netfilter/ip6_tables.c
112777+++ b/net/ipv6/netfilter/ip6_tables.c
112778@@ -1083,14 +1083,14 @@ static int compat_table_info(const struct xt_table_info *info,
112779 #endif
112780
112781 static int get_info(struct net *net, void __user *user,
112782- const int *len, int compat)
112783+ int len, int compat)
112784 {
112785 char name[XT_TABLE_MAXNAMELEN];
112786 struct xt_table *t;
112787 int ret;
112788
112789- if (*len != sizeof(struct ip6t_getinfo)) {
112790- duprintf("length %u != %zu\n", *len,
112791+ if (len != sizeof(struct ip6t_getinfo)) {
112792+ duprintf("length %u != %zu\n", len,
112793 sizeof(struct ip6t_getinfo));
112794 return -EINVAL;
112795 }
112796@@ -1127,7 +1127,7 @@ static int get_info(struct net *net, void __user *user,
112797 info.size = private->size;
112798 strcpy(info.name, name);
112799
112800- if (copy_to_user(user, &info, *len) != 0)
112801+ if (copy_to_user(user, &info, len) != 0)
112802 ret = -EFAULT;
112803 else
112804 ret = 0;
112805@@ -1983,7 +1983,7 @@ compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
112806
112807 switch (cmd) {
112808 case IP6T_SO_GET_INFO:
112809- ret = get_info(sock_net(sk), user, len, 1);
112810+ ret = get_info(sock_net(sk), user, *len, 1);
112811 break;
112812 case IP6T_SO_GET_ENTRIES:
112813 ret = compat_get_entries(sock_net(sk), user, len);
112814@@ -2030,7 +2030,7 @@ do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
112815
112816 switch (cmd) {
112817 case IP6T_SO_GET_INFO:
112818- ret = get_info(sock_net(sk), user, len, 0);
112819+ ret = get_info(sock_net(sk), user, *len, 0);
112820 break;
112821
112822 case IP6T_SO_GET_ENTRIES:
112823diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
112824index 6f187c8..34b367f 100644
112825--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
112826+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
112827@@ -96,12 +96,11 @@ static struct ctl_table nf_ct_frag6_sysctl_table[] = {
112828
112829 static int nf_ct_frag6_sysctl_register(struct net *net)
112830 {
112831- struct ctl_table *table;
112832+ ctl_table_no_const *table = NULL;
112833 struct ctl_table_header *hdr;
112834
112835- table = nf_ct_frag6_sysctl_table;
112836 if (!net_eq(net, &init_net)) {
112837- table = kmemdup(table, sizeof(nf_ct_frag6_sysctl_table),
112838+ table = kmemdup(nf_ct_frag6_sysctl_table, sizeof(nf_ct_frag6_sysctl_table),
112839 GFP_KERNEL);
112840 if (table == NULL)
112841 goto err_alloc;
112842@@ -112,9 +111,9 @@ static int nf_ct_frag6_sysctl_register(struct net *net)
112843 table[2].data = &net->nf_frag.frags.high_thresh;
112844 table[2].extra1 = &net->nf_frag.frags.low_thresh;
112845 table[2].extra2 = &init_net.nf_frag.frags.high_thresh;
112846- }
112847-
112848- hdr = register_net_sysctl(net, "net/netfilter", table);
112849+ hdr = register_net_sysctl(net, "net/netfilter", table);
112850+ } else
112851+ hdr = register_net_sysctl(net, "net/netfilter", nf_ct_frag6_sysctl_table);
112852 if (hdr == NULL)
112853 goto err_reg;
112854
112855@@ -122,8 +121,7 @@ static int nf_ct_frag6_sysctl_register(struct net *net)
112856 return 0;
112857
112858 err_reg:
112859- if (!net_eq(net, &init_net))
112860- kfree(table);
112861+ kfree(table);
112862 err_alloc:
112863 return -ENOMEM;
112864 }
112865diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
112866index a2dfff6..1e52e6d 100644
112867--- a/net/ipv6/ping.c
112868+++ b/net/ipv6/ping.c
112869@@ -241,6 +241,24 @@ static struct pernet_operations ping_v6_net_ops = {
112870 };
112871 #endif
112872
112873+static struct pingv6_ops real_pingv6_ops = {
112874+ .ipv6_recv_error = ipv6_recv_error,
112875+ .ip6_datagram_recv_common_ctl = ip6_datagram_recv_common_ctl,
112876+ .ip6_datagram_recv_specific_ctl = ip6_datagram_recv_specific_ctl,
112877+ .icmpv6_err_convert = icmpv6_err_convert,
112878+ .ipv6_icmp_error = ipv6_icmp_error,
112879+ .ipv6_chk_addr = ipv6_chk_addr,
112880+};
112881+
112882+static struct pingv6_ops dummy_pingv6_ops = {
112883+ .ipv6_recv_error = dummy_ipv6_recv_error,
112884+ .ip6_datagram_recv_common_ctl = dummy_ip6_datagram_recv_ctl,
112885+ .ip6_datagram_recv_specific_ctl = dummy_ip6_datagram_recv_ctl,
112886+ .icmpv6_err_convert = dummy_icmpv6_err_convert,
112887+ .ipv6_icmp_error = dummy_ipv6_icmp_error,
112888+ .ipv6_chk_addr = dummy_ipv6_chk_addr,
112889+};
112890+
112891 int __init pingv6_init(void)
112892 {
112893 #ifdef CONFIG_PROC_FS
112894@@ -248,13 +266,7 @@ int __init pingv6_init(void)
112895 if (ret)
112896 return ret;
112897 #endif
112898- pingv6_ops.ipv6_recv_error = ipv6_recv_error;
112899- pingv6_ops.ip6_datagram_recv_common_ctl = ip6_datagram_recv_common_ctl;
112900- pingv6_ops.ip6_datagram_recv_specific_ctl =
112901- ip6_datagram_recv_specific_ctl;
112902- pingv6_ops.icmpv6_err_convert = icmpv6_err_convert;
112903- pingv6_ops.ipv6_icmp_error = ipv6_icmp_error;
112904- pingv6_ops.ipv6_chk_addr = ipv6_chk_addr;
112905+ pingv6_ops = &real_pingv6_ops;
112906 return inet6_register_protosw(&pingv6_protosw);
112907 }
112908
112909@@ -263,14 +275,9 @@ int __init pingv6_init(void)
112910 */
112911 void pingv6_exit(void)
112912 {
112913- pingv6_ops.ipv6_recv_error = dummy_ipv6_recv_error;
112914- pingv6_ops.ip6_datagram_recv_common_ctl = dummy_ip6_datagram_recv_ctl;
112915- pingv6_ops.ip6_datagram_recv_specific_ctl = dummy_ip6_datagram_recv_ctl;
112916- pingv6_ops.icmpv6_err_convert = dummy_icmpv6_err_convert;
112917- pingv6_ops.ipv6_icmp_error = dummy_ipv6_icmp_error;
112918- pingv6_ops.ipv6_chk_addr = dummy_ipv6_chk_addr;
112919 #ifdef CONFIG_PROC_FS
112920 unregister_pernet_subsys(&ping_v6_net_ops);
112921 #endif
112922+ pingv6_ops = &dummy_pingv6_ops;
112923 inet6_unregister_protosw(&pingv6_protosw);
112924 }
112925diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c
112926index 679253d0..70b653c 100644
112927--- a/net/ipv6/proc.c
112928+++ b/net/ipv6/proc.c
112929@@ -310,7 +310,7 @@ static int __net_init ipv6_proc_init_net(struct net *net)
112930 if (!proc_create("snmp6", S_IRUGO, net->proc_net, &snmp6_seq_fops))
112931 goto proc_snmp6_fail;
112932
112933- net->mib.proc_net_devsnmp6 = proc_mkdir("dev_snmp6", net->proc_net);
112934+ net->mib.proc_net_devsnmp6 = proc_mkdir_restrict("dev_snmp6", net->proc_net);
112935 if (!net->mib.proc_net_devsnmp6)
112936 goto proc_dev_snmp6_fail;
112937 return 0;
112938diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
112939index dae7f1a..783b20d 100644
112940--- a/net/ipv6/raw.c
112941+++ b/net/ipv6/raw.c
112942@@ -388,7 +388,7 @@ static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb)
112943 {
112944 if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) &&
112945 skb_checksum_complete(skb)) {
112946- atomic_inc(&sk->sk_drops);
112947+ atomic_inc_unchecked(&sk->sk_drops);
112948 kfree_skb(skb);
112949 return NET_RX_DROP;
112950 }
112951@@ -416,7 +416,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
112952 struct raw6_sock *rp = raw6_sk(sk);
112953
112954 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
112955- atomic_inc(&sk->sk_drops);
112956+ atomic_inc_unchecked(&sk->sk_drops);
112957 kfree_skb(skb);
112958 return NET_RX_DROP;
112959 }
112960@@ -440,7 +440,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
112961
112962 if (inet->hdrincl) {
112963 if (skb_checksum_complete(skb)) {
112964- atomic_inc(&sk->sk_drops);
112965+ atomic_inc_unchecked(&sk->sk_drops);
112966 kfree_skb(skb);
112967 return NET_RX_DROP;
112968 }
112969@@ -609,7 +609,7 @@ out:
112970 return err;
112971 }
112972
112973-static int rawv6_send_hdrinc(struct sock *sk, struct msghdr *msg, int length,
112974+static int rawv6_send_hdrinc(struct sock *sk, struct msghdr *msg, unsigned int length,
112975 struct flowi6 *fl6, struct dst_entry **dstp,
112976 unsigned int flags)
112977 {
112978@@ -915,12 +915,15 @@ do_confirm:
112979 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
112980 char __user *optval, int optlen)
112981 {
112982+ struct icmp6_filter filter;
112983+
112984 switch (optname) {
112985 case ICMPV6_FILTER:
112986 if (optlen > sizeof(struct icmp6_filter))
112987 optlen = sizeof(struct icmp6_filter);
112988- if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
112989+ if (copy_from_user(&filter, optval, optlen))
112990 return -EFAULT;
112991+ raw6_sk(sk)->filter = filter;
112992 return 0;
112993 default:
112994 return -ENOPROTOOPT;
112995@@ -933,6 +936,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
112996 char __user *optval, int __user *optlen)
112997 {
112998 int len;
112999+ struct icmp6_filter filter;
113000
113001 switch (optname) {
113002 case ICMPV6_FILTER:
113003@@ -944,7 +948,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
113004 len = sizeof(struct icmp6_filter);
113005 if (put_user(len, optlen))
113006 return -EFAULT;
113007- if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
113008+ filter = raw6_sk(sk)->filter;
113009+ if (len > sizeof filter || copy_to_user(optval, &filter, len))
113010 return -EFAULT;
113011 return 0;
113012 default:
113013diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
113014index d7d70e6..bd5e9fc 100644
113015--- a/net/ipv6/reassembly.c
113016+++ b/net/ipv6/reassembly.c
113017@@ -626,12 +626,11 @@ static struct ctl_table ip6_frags_ctl_table[] = {
113018
113019 static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
113020 {
113021- struct ctl_table *table;
113022+ ctl_table_no_const *table = NULL;
113023 struct ctl_table_header *hdr;
113024
113025- table = ip6_frags_ns_ctl_table;
113026 if (!net_eq(net, &init_net)) {
113027- table = kmemdup(table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
113028+ table = kmemdup(ip6_frags_ns_ctl_table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
113029 if (table == NULL)
113030 goto err_alloc;
113031
113032@@ -645,9 +644,10 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
113033 /* Don't export sysctls to unprivileged users */
113034 if (net->user_ns != &init_user_ns)
113035 table[0].procname = NULL;
113036- }
113037+ hdr = register_net_sysctl(net, "net/ipv6", table);
113038+ } else
113039+ hdr = register_net_sysctl(net, "net/ipv6", ip6_frags_ns_ctl_table);
113040
113041- hdr = register_net_sysctl(net, "net/ipv6", table);
113042 if (hdr == NULL)
113043 goto err_reg;
113044
113045@@ -655,8 +655,7 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
113046 return 0;
113047
113048 err_reg:
113049- if (!net_eq(net, &init_net))
113050- kfree(table);
113051+ kfree(table);
113052 err_alloc:
113053 return -ENOMEM;
113054 }
113055diff --git a/net/ipv6/route.c b/net/ipv6/route.c
113056index 4688bd4..584453d 100644
113057--- a/net/ipv6/route.c
113058+++ b/net/ipv6/route.c
113059@@ -3029,7 +3029,7 @@ struct ctl_table ipv6_route_table_template[] = {
113060
113061 struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
113062 {
113063- struct ctl_table *table;
113064+ ctl_table_no_const *table;
113065
113066 table = kmemdup(ipv6_route_table_template,
113067 sizeof(ipv6_route_table_template),
113068diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
113069index e4cbd57..02b1aaa 100644
113070--- a/net/ipv6/sit.c
113071+++ b/net/ipv6/sit.c
113072@@ -74,7 +74,7 @@ static void ipip6_tunnel_setup(struct net_device *dev);
113073 static void ipip6_dev_free(struct net_device *dev);
113074 static bool check_6rd(struct ip_tunnel *tunnel, const struct in6_addr *v6dst,
113075 __be32 *v4dst);
113076-static struct rtnl_link_ops sit_link_ops __read_mostly;
113077+static struct rtnl_link_ops sit_link_ops;
113078
113079 static int sit_net_id __read_mostly;
113080 struct sit_net {
113081@@ -1751,7 +1751,7 @@ static void ipip6_dellink(struct net_device *dev, struct list_head *head)
113082 unregister_netdevice_queue(dev, head);
113083 }
113084
113085-static struct rtnl_link_ops sit_link_ops __read_mostly = {
113086+static struct rtnl_link_ops sit_link_ops = {
113087 .kind = "sit",
113088 .maxtype = IFLA_IPTUN_MAX,
113089 .policy = ipip6_policy,
113090diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c
113091index c5c10fa..2577d51 100644
113092--- a/net/ipv6/sysctl_net_ipv6.c
113093+++ b/net/ipv6/sysctl_net_ipv6.c
113094@@ -78,7 +78,7 @@ static struct ctl_table ipv6_rotable[] = {
113095
113096 static int __net_init ipv6_sysctl_net_init(struct net *net)
113097 {
113098- struct ctl_table *ipv6_table;
113099+ ctl_table_no_const *ipv6_table;
113100 struct ctl_table *ipv6_route_table;
113101 struct ctl_table *ipv6_icmp_table;
113102 int err;
113103diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
113104index 5ca3bc8..8c53c81 100644
113105--- a/net/ipv6/tcp_ipv6.c
113106+++ b/net/ipv6/tcp_ipv6.c
113107@@ -104,6 +104,10 @@ static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
113108 }
113109 }
113110
113111+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
113112+extern int grsec_enable_blackhole;
113113+#endif
113114+
113115 static void tcp_v6_hash(struct sock *sk)
113116 {
113117 if (sk->sk_state != TCP_CLOSE) {
113118@@ -1345,6 +1349,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
113119 return 0;
113120
113121 reset:
113122+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
113123+ if (!grsec_enable_blackhole)
113124+#endif
113125 tcp_v6_send_reset(sk, skb);
113126 discard:
113127 if (opt_skb)
113128@@ -1454,12 +1461,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
113129
113130 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest,
113131 inet6_iif(skb));
113132- if (!sk)
113133+ if (!sk) {
113134+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
113135+ ret = 1;
113136+#endif
113137 goto no_tcp_socket;
113138+ }
113139
113140 process:
113141- if (sk->sk_state == TCP_TIME_WAIT)
113142+ if (sk->sk_state == TCP_TIME_WAIT) {
113143+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
113144+ ret = 2;
113145+#endif
113146 goto do_time_wait;
113147+ }
113148
113149 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
113150 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
113151@@ -1510,6 +1525,10 @@ csum_error:
113152 bad_packet:
113153 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
113154 } else {
113155+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
113156+ if (!grsec_enable_blackhole || (ret == 1 &&
113157+ (skb->dev->flags & IFF_LOOPBACK)))
113158+#endif
113159 tcp_v6_send_reset(NULL, skb);
113160 }
113161
113162diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
113163index 1c9512a..786b8d6 100644
113164--- a/net/ipv6/udp.c
113165+++ b/net/ipv6/udp.c
113166@@ -76,6 +76,10 @@ static unsigned int udp6_ehashfn(struct net *net,
113167 udp_ipv6_hash_secret + net_hash_mix(net));
113168 }
113169
113170+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
113171+extern int grsec_enable_blackhole;
113172+#endif
113173+
113174 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
113175 {
113176 const struct in6_addr *sk2_rcv_saddr6 = inet6_rcv_saddr(sk2);
113177@@ -448,7 +452,7 @@ try_again:
113178 if (unlikely(err)) {
113179 trace_kfree_skb(skb, udpv6_recvmsg);
113180 if (!peeked) {
113181- atomic_inc(&sk->sk_drops);
113182+ atomic_inc_unchecked(&sk->sk_drops);
113183 if (is_udp4)
113184 UDP_INC_STATS_USER(sock_net(sk),
113185 UDP_MIB_INERRORS,
113186@@ -712,7 +716,7 @@ csum_error:
113187 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
113188 drop:
113189 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
113190- atomic_inc(&sk->sk_drops);
113191+ atomic_inc_unchecked(&sk->sk_drops);
113192 kfree_skb(skb);
113193 return -1;
113194 }
113195@@ -753,7 +757,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
113196 if (likely(skb1 == NULL))
113197 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
113198 if (!skb1) {
113199- atomic_inc(&sk->sk_drops);
113200+ atomic_inc_unchecked(&sk->sk_drops);
113201 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
113202 IS_UDPLITE(sk));
113203 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
113204@@ -937,6 +941,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
113205 goto csum_error;
113206
113207 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
113208+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
113209+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
113210+#endif
113211 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
113212
113213 kfree_skb(skb);
113214diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
113215index 8d2d01b4..313511e 100644
113216--- a/net/ipv6/xfrm6_policy.c
113217+++ b/net/ipv6/xfrm6_policy.c
113218@@ -224,11 +224,11 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
113219 }
113220 }
113221
113222-static inline int xfrm6_garbage_collect(struct dst_ops *ops)
113223+static int xfrm6_garbage_collect(struct dst_ops *ops)
113224 {
113225 struct net *net = container_of(ops, struct net, xfrm.xfrm6_dst_ops);
113226
113227- xfrm6_policy_afinfo.garbage_collect(net);
113228+ xfrm_garbage_collect_deferred(net);
113229 return dst_entries_get_fast(ops) > ops->gc_thresh * 2;
113230 }
113231
113232@@ -341,19 +341,19 @@ static struct ctl_table xfrm6_policy_table[] = {
113233
113234 static int __net_init xfrm6_net_init(struct net *net)
113235 {
113236- struct ctl_table *table;
113237+ ctl_table_no_const *table = NULL;
113238 struct ctl_table_header *hdr;
113239
113240- table = xfrm6_policy_table;
113241 if (!net_eq(net, &init_net)) {
113242- table = kmemdup(table, sizeof(xfrm6_policy_table), GFP_KERNEL);
113243+ table = kmemdup(xfrm6_policy_table, sizeof(xfrm6_policy_table), GFP_KERNEL);
113244 if (!table)
113245 goto err_alloc;
113246
113247 table[0].data = &net->xfrm.xfrm6_dst_ops.gc_thresh;
113248- }
113249+ hdr = register_net_sysctl(net, "net/ipv6", table);
113250+ } else
113251+ hdr = register_net_sysctl(net, "net/ipv6", xfrm6_policy_table);
113252
113253- hdr = register_net_sysctl(net, "net/ipv6", table);
113254 if (!hdr)
113255 goto err_reg;
113256
113257@@ -361,8 +361,7 @@ static int __net_init xfrm6_net_init(struct net *net)
113258 return 0;
113259
113260 err_reg:
113261- if (!net_eq(net, &init_net))
113262- kfree(table);
113263+ kfree(table);
113264 err_alloc:
113265 return -ENOMEM;
113266 }
113267diff --git a/net/ipx/ipx_proc.c b/net/ipx/ipx_proc.c
113268index c1d247e..9e5949d 100644
113269--- a/net/ipx/ipx_proc.c
113270+++ b/net/ipx/ipx_proc.c
113271@@ -289,7 +289,7 @@ int __init ipx_proc_init(void)
113272 struct proc_dir_entry *p;
113273 int rc = -ENOMEM;
113274
113275- ipx_proc_dir = proc_mkdir("ipx", init_net.proc_net);
113276+ ipx_proc_dir = proc_mkdir_restrict("ipx", init_net.proc_net);
113277
113278 if (!ipx_proc_dir)
113279 goto out;
113280diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
113281index 683346d..cb0e12d 100644
113282--- a/net/irda/ircomm/ircomm_tty.c
113283+++ b/net/irda/ircomm/ircomm_tty.c
113284@@ -310,10 +310,10 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
113285 add_wait_queue(&port->open_wait, &wait);
113286
113287 pr_debug("%s(%d):block_til_ready before block on %s open_count=%d\n",
113288- __FILE__, __LINE__, tty->driver->name, port->count);
113289+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
113290
113291 spin_lock_irqsave(&port->lock, flags);
113292- port->count--;
113293+ atomic_dec(&port->count);
113294 port->blocked_open++;
113295 spin_unlock_irqrestore(&port->lock, flags);
113296
113297@@ -348,7 +348,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
113298 }
113299
113300 pr_debug("%s(%d):block_til_ready blocking on %s open_count=%d\n",
113301- __FILE__, __LINE__, tty->driver->name, port->count);
113302+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
113303
113304 schedule();
113305 }
113306@@ -358,12 +358,12 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
113307
113308 spin_lock_irqsave(&port->lock, flags);
113309 if (!tty_hung_up_p(filp))
113310- port->count++;
113311+ atomic_inc(&port->count);
113312 port->blocked_open--;
113313 spin_unlock_irqrestore(&port->lock, flags);
113314
113315 pr_debug("%s(%d):block_til_ready after blocking on %s open_count=%d\n",
113316- __FILE__, __LINE__, tty->driver->name, port->count);
113317+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
113318
113319 if (!retval)
113320 port->flags |= ASYNC_NORMAL_ACTIVE;
113321@@ -433,12 +433,12 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
113322
113323 /* ++ is not atomic, so this should be protected - Jean II */
113324 spin_lock_irqsave(&self->port.lock, flags);
113325- self->port.count++;
113326+ atomic_inc(&self->port.count);
113327 spin_unlock_irqrestore(&self->port.lock, flags);
113328 tty_port_tty_set(&self->port, tty);
113329
113330 pr_debug("%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
113331- self->line, self->port.count);
113332+ self->line, atomic_read(&self->port.count));
113333
113334 /* Not really used by us, but lets do it anyway */
113335 self->port.low_latency = (self->port.flags & ASYNC_LOW_LATENCY) ? 1 : 0;
113336@@ -961,7 +961,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
113337 tty_kref_put(port->tty);
113338 }
113339 port->tty = NULL;
113340- port->count = 0;
113341+ atomic_set(&port->count, 0);
113342 spin_unlock_irqrestore(&port->lock, flags);
113343
113344 wake_up_interruptible(&port->open_wait);
113345@@ -1308,7 +1308,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
113346 seq_putc(m, '\n');
113347
113348 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
113349- seq_printf(m, "Open count: %d\n", self->port.count);
113350+ seq_printf(m, "Open count: %d\n", atomic_read(&self->port.count));
113351 seq_printf(m, "Max data size: %d\n", self->max_data_size);
113352 seq_printf(m, "Max header size: %d\n", self->max_header_size);
113353
113354diff --git a/net/irda/irproc.c b/net/irda/irproc.c
113355index b9ac598..f88cc56 100644
113356--- a/net/irda/irproc.c
113357+++ b/net/irda/irproc.c
113358@@ -66,7 +66,7 @@ void __init irda_proc_register(void)
113359 {
113360 int i;
113361
113362- proc_irda = proc_mkdir("irda", init_net.proc_net);
113363+ proc_irda = proc_mkdir_restrict("irda", init_net.proc_net);
113364 if (proc_irda == NULL)
113365 return;
113366
113367diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
113368index 53d9311..cbaf99f 100644
113369--- a/net/iucv/af_iucv.c
113370+++ b/net/iucv/af_iucv.c
113371@@ -686,10 +686,10 @@ static void __iucv_auto_name(struct iucv_sock *iucv)
113372 {
113373 char name[12];
113374
113375- sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
113376+ sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
113377 while (__iucv_get_sock_by_name(name)) {
113378 sprintf(name, "%08x",
113379- atomic_inc_return(&iucv_sk_list.autobind_name));
113380+ atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
113381 }
113382 memcpy(iucv->src_name, name, 8);
113383 }
113384diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
113385index 2a6a1fd..6c112b0 100644
113386--- a/net/iucv/iucv.c
113387+++ b/net/iucv/iucv.c
113388@@ -702,7 +702,7 @@ static int iucv_cpu_notify(struct notifier_block *self,
113389 return NOTIFY_OK;
113390 }
113391
113392-static struct notifier_block __refdata iucv_cpu_notifier = {
113393+static struct notifier_block iucv_cpu_notifier = {
113394 .notifier_call = iucv_cpu_notify,
113395 };
113396
113397diff --git a/net/key/af_key.c b/net/key/af_key.c
113398index f8ac939..1e189bf 100644
113399--- a/net/key/af_key.c
113400+++ b/net/key/af_key.c
113401@@ -3049,10 +3049,10 @@ static int pfkey_send_policy_notify(struct xfrm_policy *xp, int dir, const struc
113402 static u32 get_acqseq(void)
113403 {
113404 u32 res;
113405- static atomic_t acqseq;
113406+ static atomic_unchecked_t acqseq;
113407
113408 do {
113409- res = atomic_inc_return(&acqseq);
113410+ res = atomic_inc_return_unchecked(&acqseq);
113411 } while (!res);
113412 return res;
113413 }
113414diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c
113415index 781b3a2..73a7434 100644
113416--- a/net/l2tp/l2tp_eth.c
113417+++ b/net/l2tp/l2tp_eth.c
113418@@ -42,12 +42,12 @@ struct l2tp_eth {
113419 struct sock *tunnel_sock;
113420 struct l2tp_session *session;
113421 struct list_head list;
113422- atomic_long_t tx_bytes;
113423- atomic_long_t tx_packets;
113424- atomic_long_t tx_dropped;
113425- atomic_long_t rx_bytes;
113426- atomic_long_t rx_packets;
113427- atomic_long_t rx_errors;
113428+ atomic_long_unchecked_t tx_bytes;
113429+ atomic_long_unchecked_t tx_packets;
113430+ atomic_long_unchecked_t tx_dropped;
113431+ atomic_long_unchecked_t rx_bytes;
113432+ atomic_long_unchecked_t rx_packets;
113433+ atomic_long_unchecked_t rx_errors;
113434 };
113435
113436 /* via l2tp_session_priv() */
113437@@ -98,10 +98,10 @@ static int l2tp_eth_dev_xmit(struct sk_buff *skb, struct net_device *dev)
113438 int ret = l2tp_xmit_skb(session, skb, session->hdr_len);
113439
113440 if (likely(ret == NET_XMIT_SUCCESS)) {
113441- atomic_long_add(len, &priv->tx_bytes);
113442- atomic_long_inc(&priv->tx_packets);
113443+ atomic_long_add_unchecked(len, &priv->tx_bytes);
113444+ atomic_long_inc_unchecked(&priv->tx_packets);
113445 } else {
113446- atomic_long_inc(&priv->tx_dropped);
113447+ atomic_long_inc_unchecked(&priv->tx_dropped);
113448 }
113449 return NETDEV_TX_OK;
113450 }
113451@@ -111,12 +111,12 @@ static struct rtnl_link_stats64 *l2tp_eth_get_stats64(struct net_device *dev,
113452 {
113453 struct l2tp_eth *priv = netdev_priv(dev);
113454
113455- stats->tx_bytes = atomic_long_read(&priv->tx_bytes);
113456- stats->tx_packets = atomic_long_read(&priv->tx_packets);
113457- stats->tx_dropped = atomic_long_read(&priv->tx_dropped);
113458- stats->rx_bytes = atomic_long_read(&priv->rx_bytes);
113459- stats->rx_packets = atomic_long_read(&priv->rx_packets);
113460- stats->rx_errors = atomic_long_read(&priv->rx_errors);
113461+ stats->tx_bytes = atomic_long_read_unchecked(&priv->tx_bytes);
113462+ stats->tx_packets = atomic_long_read_unchecked(&priv->tx_packets);
113463+ stats->tx_dropped = atomic_long_read_unchecked(&priv->tx_dropped);
113464+ stats->rx_bytes = atomic_long_read_unchecked(&priv->rx_bytes);
113465+ stats->rx_packets = atomic_long_read_unchecked(&priv->rx_packets);
113466+ stats->rx_errors = atomic_long_read_unchecked(&priv->rx_errors);
113467 return stats;
113468 }
113469
113470@@ -167,15 +167,15 @@ static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb,
113471 nf_reset(skb);
113472
113473 if (dev_forward_skb(dev, skb) == NET_RX_SUCCESS) {
113474- atomic_long_inc(&priv->rx_packets);
113475- atomic_long_add(data_len, &priv->rx_bytes);
113476+ atomic_long_inc_unchecked(&priv->rx_packets);
113477+ atomic_long_add_unchecked(data_len, &priv->rx_bytes);
113478 } else {
113479- atomic_long_inc(&priv->rx_errors);
113480+ atomic_long_inc_unchecked(&priv->rx_errors);
113481 }
113482 return;
113483
113484 error:
113485- atomic_long_inc(&priv->rx_errors);
113486+ atomic_long_inc_unchecked(&priv->rx_errors);
113487 kfree_skb(skb);
113488 }
113489
113490diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
113491index 05dfc8aa..df6cfd7 100644
113492--- a/net/l2tp/l2tp_ip.c
113493+++ b/net/l2tp/l2tp_ip.c
113494@@ -608,7 +608,7 @@ static struct inet_protosw l2tp_ip_protosw = {
113495 .ops = &l2tp_ip_ops,
113496 };
113497
113498-static struct net_protocol l2tp_ip_protocol __read_mostly = {
113499+static const struct net_protocol l2tp_ip_protocol = {
113500 .handler = l2tp_ip_recv,
113501 .netns_ok = 1,
113502 };
113503diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
113504index 8611f1b..bc60a2d 100644
113505--- a/net/l2tp/l2tp_ip6.c
113506+++ b/net/l2tp/l2tp_ip6.c
113507@@ -757,7 +757,7 @@ static struct inet_protosw l2tp_ip6_protosw = {
113508 .ops = &l2tp_ip6_ops,
113509 };
113510
113511-static struct inet6_protocol l2tp_ip6_protocol __read_mostly = {
113512+static const struct inet6_protocol l2tp_ip6_protocol = {
113513 .handler = l2tp_ip6_recv,
113514 };
113515
113516diff --git a/net/llc/llc_proc.c b/net/llc/llc_proc.c
113517index 1a3c7e0..80f8b0c 100644
113518--- a/net/llc/llc_proc.c
113519+++ b/net/llc/llc_proc.c
113520@@ -247,7 +247,7 @@ int __init llc_proc_init(void)
113521 int rc = -ENOMEM;
113522 struct proc_dir_entry *p;
113523
113524- llc_proc_dir = proc_mkdir("llc", init_net.proc_net);
113525+ llc_proc_dir = proc_mkdir_restrict("llc", init_net.proc_net);
113526 if (!llc_proc_dir)
113527 goto out;
113528
113529diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
113530index dd4ff36..3462997 100644
113531--- a/net/mac80211/cfg.c
113532+++ b/net/mac80211/cfg.c
113533@@ -581,7 +581,7 @@ static int ieee80211_set_monitor_channel(struct wiphy *wiphy,
113534 ret = ieee80211_vif_use_channel(sdata, chandef,
113535 IEEE80211_CHANCTX_EXCLUSIVE);
113536 }
113537- } else if (local->open_count == local->monitors) {
113538+ } else if (local_read(&local->open_count) == local->monitors) {
113539 local->_oper_chandef = *chandef;
113540 ieee80211_hw_config(local, 0);
113541 }
113542@@ -3468,7 +3468,7 @@ static void ieee80211_mgmt_frame_register(struct wiphy *wiphy,
113543 else
113544 local->probe_req_reg--;
113545
113546- if (!local->open_count)
113547+ if (!local_read(&local->open_count))
113548 break;
113549
113550 ieee80211_queue_work(&local->hw, &local->reconfig_filter);
113551@@ -3603,8 +3603,8 @@ static int ieee80211_cfg_get_channel(struct wiphy *wiphy,
113552 if (chanctx_conf) {
113553 *chandef = sdata->vif.bss_conf.chandef;
113554 ret = 0;
113555- } else if (local->open_count > 0 &&
113556- local->open_count == local->monitors &&
113557+ } else if (local_read(&local->open_count) > 0 &&
113558+ local_read(&local->open_count) == local->monitors &&
113559 sdata->vif.type == NL80211_IFTYPE_MONITOR) {
113560 if (local->use_chanctx)
113561 *chandef = local->monitor_chandef;
113562diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
113563index 81e8dc5..5b77c58 100644
113564--- a/net/mac80211/ieee80211_i.h
113565+++ b/net/mac80211/ieee80211_i.h
113566@@ -29,6 +29,7 @@
113567 #include <net/ieee80211_radiotap.h>
113568 #include <net/cfg80211.h>
113569 #include <net/mac80211.h>
113570+#include <asm/local.h>
113571 #include "key.h"
113572 #include "sta_info.h"
113573 #include "debug.h"
113574@@ -1129,7 +1130,7 @@ struct ieee80211_local {
113575 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
113576 spinlock_t queue_stop_reason_lock;
113577
113578- int open_count;
113579+ local_t open_count;
113580 int monitors, cooked_mntrs;
113581 /* number of interfaces with corresponding FIF_ flags */
113582 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
113583diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
113584index 81a2751..c06a026 100644
113585--- a/net/mac80211/iface.c
113586+++ b/net/mac80211/iface.c
113587@@ -544,7 +544,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
113588 break;
113589 }
113590
113591- if (local->open_count == 0) {
113592+ if (local_read(&local->open_count) == 0) {
113593 res = drv_start(local);
113594 if (res)
113595 goto err_del_bss;
113596@@ -591,7 +591,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
113597 res = drv_add_interface(local, sdata);
113598 if (res)
113599 goto err_stop;
113600- } else if (local->monitors == 0 && local->open_count == 0) {
113601+ } else if (local->monitors == 0 && local_read(&local->open_count) == 0) {
113602 res = ieee80211_add_virtual_monitor(local);
113603 if (res)
113604 goto err_stop;
113605@@ -701,7 +701,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
113606 atomic_inc(&local->iff_promiscs);
113607
113608 if (coming_up)
113609- local->open_count++;
113610+ local_inc(&local->open_count);
113611
113612 if (hw_reconf_flags)
113613 ieee80211_hw_config(local, hw_reconf_flags);
113614@@ -739,7 +739,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
113615 err_del_interface:
113616 drv_remove_interface(local, sdata);
113617 err_stop:
113618- if (!local->open_count)
113619+ if (!local_read(&local->open_count))
113620 drv_stop(local);
113621 err_del_bss:
113622 sdata->bss = NULL;
113623@@ -907,7 +907,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
113624 }
113625
113626 if (going_down)
113627- local->open_count--;
113628+ local_dec(&local->open_count);
113629
113630 switch (sdata->vif.type) {
113631 case NL80211_IFTYPE_AP_VLAN:
113632@@ -969,7 +969,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
113633 }
113634 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
113635
113636- if (local->open_count == 0)
113637+ if (local_read(&local->open_count) == 0)
113638 ieee80211_clear_tx_pending(local);
113639
113640 /*
113641@@ -1012,7 +1012,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
113642 if (cancel_scan)
113643 flush_delayed_work(&local->scan_work);
113644
113645- if (local->open_count == 0) {
113646+ if (local_read(&local->open_count) == 0) {
113647 ieee80211_stop_device(local);
113648
113649 /* no reconfiguring after stop! */
113650@@ -1023,7 +1023,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
113651 ieee80211_configure_filter(local);
113652 ieee80211_hw_config(local, hw_reconf_flags);
113653
113654- if (local->monitors == local->open_count)
113655+ if (local->monitors == local_read(&local->open_count))
113656 ieee80211_add_virtual_monitor(local);
113657 }
113658
113659diff --git a/net/mac80211/main.c b/net/mac80211/main.c
113660index 5e09d35..e2fdbe2 100644
113661--- a/net/mac80211/main.c
113662+++ b/net/mac80211/main.c
113663@@ -175,7 +175,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
113664 changed &= ~(IEEE80211_CONF_CHANGE_CHANNEL |
113665 IEEE80211_CONF_CHANGE_POWER);
113666
113667- if (changed && local->open_count) {
113668+ if (changed && local_read(&local->open_count)) {
113669 ret = drv_config(local, changed);
113670 /*
113671 * Goal:
113672diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
113673index ca405b6..6cc8bee 100644
113674--- a/net/mac80211/pm.c
113675+++ b/net/mac80211/pm.c
113676@@ -12,7 +12,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
113677 struct ieee80211_sub_if_data *sdata;
113678 struct sta_info *sta;
113679
113680- if (!local->open_count)
113681+ if (!local_read(&local->open_count))
113682 goto suspend;
113683
113684 ieee80211_scan_cancel(local);
113685@@ -59,7 +59,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
113686 cancel_work_sync(&local->dynamic_ps_enable_work);
113687 del_timer_sync(&local->dynamic_ps_timer);
113688
113689- local->wowlan = wowlan && local->open_count;
113690+ local->wowlan = wowlan && local_read(&local->open_count);
113691 if (local->wowlan) {
113692 int err = drv_suspend(local, wowlan);
113693 if (err < 0) {
113694@@ -126,7 +126,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
113695 WARN_ON(!list_empty(&local->chanctx_list));
113696
113697 /* stop hardware - this must stop RX */
113698- if (local->open_count)
113699+ if (local_read(&local->open_count))
113700 ieee80211_stop_device(local);
113701
113702 suspend:
113703diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
113704index d53355b..21f583a 100644
113705--- a/net/mac80211/rate.c
113706+++ b/net/mac80211/rate.c
113707@@ -724,7 +724,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
113708
113709 ASSERT_RTNL();
113710
113711- if (local->open_count)
113712+ if (local_read(&local->open_count))
113713 return -EBUSY;
113714
113715 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
113716diff --git a/net/mac80211/util.c b/net/mac80211/util.c
113717index 747bdcf..eb2b981 100644
113718--- a/net/mac80211/util.c
113719+++ b/net/mac80211/util.c
113720@@ -1741,7 +1741,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
113721 bool sched_scan_stopped = false;
113722
113723 /* nothing to do if HW shouldn't run */
113724- if (!local->open_count)
113725+ if (!local_read(&local->open_count))
113726 goto wake_up;
113727
113728 #ifdef CONFIG_PM
113729@@ -1993,7 +1993,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
113730 local->in_reconfig = false;
113731 barrier();
113732
113733- if (local->monitors == local->open_count && local->monitors > 0)
113734+ if (local->monitors == local_read(&local->open_count) && local->monitors > 0)
113735 ieee80211_add_virtual_monitor(local);
113736
113737 /*
113738@@ -2048,7 +2048,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
113739 * If this is for hw restart things are still running.
113740 * We may want to change that later, however.
113741 */
113742- if (local->open_count && (!local->suspended || reconfig_due_to_wowlan))
113743+ if (local_read(&local->open_count) && (!local->suspended || reconfig_due_to_wowlan))
113744 drv_reconfig_complete(local, IEEE80211_RECONFIG_TYPE_RESTART);
113745
113746 if (!local->suspended)
113747@@ -2072,7 +2072,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
113748 flush_delayed_work(&local->scan_work);
113749 }
113750
113751- if (local->open_count && !reconfig_due_to_wowlan)
113752+ if (local_read(&local->open_count) && !reconfig_due_to_wowlan)
113753 drv_reconfig_complete(local, IEEE80211_RECONFIG_TYPE_SUSPEND);
113754
113755 list_for_each_entry(sdata, &local->interfaces, list) {
113756diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
113757index b02660f..c0f791c 100644
113758--- a/net/netfilter/Kconfig
113759+++ b/net/netfilter/Kconfig
113760@@ -1122,6 +1122,16 @@ config NETFILTER_XT_MATCH_ESP
113761
113762 To compile it as a module, choose M here. If unsure, say N.
113763
113764+config NETFILTER_XT_MATCH_GRADM
113765+ tristate '"gradm" match support'
113766+ depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
113767+ depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
113768+ ---help---
113769+ The gradm match allows to match on grsecurity RBAC being enabled.
113770+ It is useful when iptables rules are applied early on bootup to
113771+ prevent connections to the machine (except from a trusted host)
113772+ while the RBAC system is disabled.
113773+
113774 config NETFILTER_XT_MATCH_HASHLIMIT
113775 tristate '"hashlimit" match support'
113776 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
113777diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
113778index 89f73a9..e4e5bd9 100644
113779--- a/net/netfilter/Makefile
113780+++ b/net/netfilter/Makefile
113781@@ -139,6 +139,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
113782 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
113783 obj-$(CONFIG_NETFILTER_XT_MATCH_ECN) += xt_ecn.o
113784 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
113785+obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
113786 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
113787 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
113788 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
113789diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
113790index d259da3..6a32b2c 100644
113791--- a/net/netfilter/ipset/ip_set_core.c
113792+++ b/net/netfilter/ipset/ip_set_core.c
113793@@ -1952,7 +1952,7 @@ done:
113794 return ret;
113795 }
113796
113797-static struct nf_sockopt_ops so_set __read_mostly = {
113798+static struct nf_sockopt_ops so_set = {
113799 .pf = PF_INET,
113800 .get_optmin = SO_IP_SET,
113801 .get_optmax = SO_IP_SET + 1,
113802diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
113803index b0f7b62..0541842 100644
113804--- a/net/netfilter/ipvs/ip_vs_conn.c
113805+++ b/net/netfilter/ipvs/ip_vs_conn.c
113806@@ -572,7 +572,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
113807 /* Increase the refcnt counter of the dest */
113808 ip_vs_dest_hold(dest);
113809
113810- conn_flags = atomic_read(&dest->conn_flags);
113811+ conn_flags = atomic_read_unchecked(&dest->conn_flags);
113812 if (cp->protocol != IPPROTO_UDP)
113813 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
113814 flags = cp->flags;
113815@@ -922,7 +922,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p, int dest_af,
113816
113817 cp->control = NULL;
113818 atomic_set(&cp->n_control, 0);
113819- atomic_set(&cp->in_pkts, 0);
113820+ atomic_set_unchecked(&cp->in_pkts, 0);
113821
113822 cp->packet_xmit = NULL;
113823 cp->app = NULL;
113824@@ -1229,7 +1229,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
113825
113826 /* Don't drop the entry if its number of incoming packets is not
113827 located in [0, 8] */
113828- i = atomic_read(&cp->in_pkts);
113829+ i = atomic_read_unchecked(&cp->in_pkts);
113830 if (i > 8 || i < 0) return 0;
113831
113832 if (!todrop_rate[i]) return 0;
113833diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
113834index b87ca32..76c7799 100644
113835--- a/net/netfilter/ipvs/ip_vs_core.c
113836+++ b/net/netfilter/ipvs/ip_vs_core.c
113837@@ -568,7 +568,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
113838 ret = cp->packet_xmit(skb, cp, pd->pp, iph);
113839 /* do not touch skb anymore */
113840
113841- atomic_inc(&cp->in_pkts);
113842+ atomic_inc_unchecked(&cp->in_pkts);
113843 ip_vs_conn_put(cp);
113844 return ret;
113845 }
113846@@ -1723,7 +1723,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
113847 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
113848 pkts = sysctl_sync_threshold(ipvs);
113849 else
113850- pkts = atomic_add_return(1, &cp->in_pkts);
113851+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
113852
113853 if (ipvs->sync_state & IP_VS_STATE_MASTER)
113854 ip_vs_sync_conn(net, cp, pkts);
113855diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
113856index ed99448..3ba6cad 100644
113857--- a/net/netfilter/ipvs/ip_vs_ctl.c
113858+++ b/net/netfilter/ipvs/ip_vs_ctl.c
113859@@ -799,7 +799,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
113860 */
113861 ip_vs_rs_hash(ipvs, dest);
113862 }
113863- atomic_set(&dest->conn_flags, conn_flags);
113864+ atomic_set_unchecked(&dest->conn_flags, conn_flags);
113865
113866 /* bind the service */
113867 old_svc = rcu_dereference_protected(dest->svc, 1);
113868@@ -1664,7 +1664,7 @@ proc_do_sync_ports(struct ctl_table *table, int write,
113869 * align with netns init in ip_vs_control_net_init()
113870 */
113871
113872-static struct ctl_table vs_vars[] = {
113873+static ctl_table_no_const vs_vars[] __read_only = {
113874 {
113875 .procname = "amemthresh",
113876 .maxlen = sizeof(int),
113877@@ -1999,7 +1999,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
113878 " %-7s %-6d %-10d %-10d\n",
113879 &dest->addr.in6,
113880 ntohs(dest->port),
113881- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
113882+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
113883 atomic_read(&dest->weight),
113884 atomic_read(&dest->activeconns),
113885 atomic_read(&dest->inactconns));
113886@@ -2010,7 +2010,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
113887 "%-7s %-6d %-10d %-10d\n",
113888 ntohl(dest->addr.ip),
113889 ntohs(dest->port),
113890- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
113891+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
113892 atomic_read(&dest->weight),
113893 atomic_read(&dest->activeconns),
113894 atomic_read(&dest->inactconns));
113895@@ -2499,7 +2499,7 @@ __ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get,
113896
113897 entry.addr = dest->addr.ip;
113898 entry.port = dest->port;
113899- entry.conn_flags = atomic_read(&dest->conn_flags);
113900+ entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
113901 entry.weight = atomic_read(&dest->weight);
113902 entry.u_threshold = dest->u_threshold;
113903 entry.l_threshold = dest->l_threshold;
113904@@ -3040,7 +3040,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
113905 if (nla_put(skb, IPVS_DEST_ATTR_ADDR, sizeof(dest->addr), &dest->addr) ||
113906 nla_put_be16(skb, IPVS_DEST_ATTR_PORT, dest->port) ||
113907 nla_put_u32(skb, IPVS_DEST_ATTR_FWD_METHOD,
113908- (atomic_read(&dest->conn_flags) &
113909+ (atomic_read_unchecked(&dest->conn_flags) &
113910 IP_VS_CONN_F_FWD_MASK)) ||
113911 nla_put_u32(skb, IPVS_DEST_ATTR_WEIGHT,
113912 atomic_read(&dest->weight)) ||
113913@@ -3675,7 +3675,7 @@ static int __net_init ip_vs_control_net_init_sysctl(struct net *net)
113914 {
113915 int idx;
113916 struct netns_ipvs *ipvs = net_ipvs(net);
113917- struct ctl_table *tbl;
113918+ ctl_table_no_const *tbl;
113919
113920 atomic_set(&ipvs->dropentry, 0);
113921 spin_lock_init(&ipvs->dropentry_lock);
113922diff --git a/net/netfilter/ipvs/ip_vs_lblc.c b/net/netfilter/ipvs/ip_vs_lblc.c
113923index 127f140..553d652 100644
113924--- a/net/netfilter/ipvs/ip_vs_lblc.c
113925+++ b/net/netfilter/ipvs/ip_vs_lblc.c
113926@@ -118,7 +118,7 @@ struct ip_vs_lblc_table {
113927 * IPVS LBLC sysctl table
113928 */
113929 #ifdef CONFIG_SYSCTL
113930-static struct ctl_table vs_vars_table[] = {
113931+static ctl_table_no_const vs_vars_table[] __read_only = {
113932 {
113933 .procname = "lblc_expiration",
113934 .data = NULL,
113935diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c
113936index 2229d2d..b32b785 100644
113937--- a/net/netfilter/ipvs/ip_vs_lblcr.c
113938+++ b/net/netfilter/ipvs/ip_vs_lblcr.c
113939@@ -289,7 +289,7 @@ struct ip_vs_lblcr_table {
113940 * IPVS LBLCR sysctl table
113941 */
113942
113943-static struct ctl_table vs_vars_table[] = {
113944+static ctl_table_no_const vs_vars_table[] __read_only = {
113945 {
113946 .procname = "lblcr_expiration",
113947 .data = NULL,
113948diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
113949index d93ceeb..4556144 100644
113950--- a/net/netfilter/ipvs/ip_vs_sync.c
113951+++ b/net/netfilter/ipvs/ip_vs_sync.c
113952@@ -609,7 +609,7 @@ static void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp,
113953 cp = cp->control;
113954 if (cp) {
113955 if (cp->flags & IP_VS_CONN_F_TEMPLATE)
113956- pkts = atomic_add_return(1, &cp->in_pkts);
113957+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
113958 else
113959 pkts = sysctl_sync_threshold(ipvs);
113960 ip_vs_sync_conn(net, cp->control, pkts);
113961@@ -771,7 +771,7 @@ control:
113962 if (!cp)
113963 return;
113964 if (cp->flags & IP_VS_CONN_F_TEMPLATE)
113965- pkts = atomic_add_return(1, &cp->in_pkts);
113966+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
113967 else
113968 pkts = sysctl_sync_threshold(ipvs);
113969 goto sloop;
113970@@ -902,7 +902,7 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
113971
113972 if (opt)
113973 memcpy(&cp->in_seq, opt, sizeof(*opt));
113974- atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
113975+ atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
113976 cp->state = state;
113977 cp->old_state = cp->state;
113978 /*
113979diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
113980index 3aedbda..6a63567 100644
113981--- a/net/netfilter/ipvs/ip_vs_xmit.c
113982+++ b/net/netfilter/ipvs/ip_vs_xmit.c
113983@@ -1214,7 +1214,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
113984 else
113985 rc = NF_ACCEPT;
113986 /* do not touch skb anymore */
113987- atomic_inc(&cp->in_pkts);
113988+ atomic_inc_unchecked(&cp->in_pkts);
113989 goto out;
113990 }
113991
113992@@ -1307,7 +1307,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
113993 else
113994 rc = NF_ACCEPT;
113995 /* do not touch skb anymore */
113996- atomic_inc(&cp->in_pkts);
113997+ atomic_inc_unchecked(&cp->in_pkts);
113998 goto out;
113999 }
114000
114001diff --git a/net/netfilter/nf_conntrack_acct.c b/net/netfilter/nf_conntrack_acct.c
114002index a4b5e2a..13b1de3 100644
114003--- a/net/netfilter/nf_conntrack_acct.c
114004+++ b/net/netfilter/nf_conntrack_acct.c
114005@@ -62,7 +62,7 @@ static struct nf_ct_ext_type acct_extend __read_mostly = {
114006 #ifdef CONFIG_SYSCTL
114007 static int nf_conntrack_acct_init_sysctl(struct net *net)
114008 {
114009- struct ctl_table *table;
114010+ ctl_table_no_const *table;
114011
114012 table = kmemdup(acct_sysctl_table, sizeof(acct_sysctl_table),
114013 GFP_KERNEL);
114014diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
114015index 13fad86..18c984c 100644
114016--- a/net/netfilter/nf_conntrack_core.c
114017+++ b/net/netfilter/nf_conntrack_core.c
114018@@ -1733,6 +1733,10 @@ void nf_conntrack_init_end(void)
114019 #define DYING_NULLS_VAL ((1<<30)+1)
114020 #define TEMPLATE_NULLS_VAL ((1<<30)+2)
114021
114022+#ifdef CONFIG_GRKERNSEC_HIDESYM
114023+static atomic_unchecked_t conntrack_cache_id = ATOMIC_INIT(0);
114024+#endif
114025+
114026 int nf_conntrack_init_net(struct net *net)
114027 {
114028 int ret = -ENOMEM;
114029@@ -1758,7 +1762,11 @@ int nf_conntrack_init_net(struct net *net)
114030 if (!net->ct.stat)
114031 goto err_pcpu_lists;
114032
114033+#ifdef CONFIG_GRKERNSEC_HIDESYM
114034+ net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%08x", atomic_inc_return_unchecked(&conntrack_cache_id));
114035+#else
114036 net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%p", net);
114037+#endif
114038 if (!net->ct.slabname)
114039 goto err_slabname;
114040
114041diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c
114042index 4e78c57..ec8fb74 100644
114043--- a/net/netfilter/nf_conntrack_ecache.c
114044+++ b/net/netfilter/nf_conntrack_ecache.c
114045@@ -264,7 +264,7 @@ static struct nf_ct_ext_type event_extend __read_mostly = {
114046 #ifdef CONFIG_SYSCTL
114047 static int nf_conntrack_event_init_sysctl(struct net *net)
114048 {
114049- struct ctl_table *table;
114050+ ctl_table_no_const *table;
114051
114052 table = kmemdup(event_sysctl_table, sizeof(event_sysctl_table),
114053 GFP_KERNEL);
114054diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
114055index bd9d315..989947e 100644
114056--- a/net/netfilter/nf_conntrack_helper.c
114057+++ b/net/netfilter/nf_conntrack_helper.c
114058@@ -57,7 +57,7 @@ static struct ctl_table helper_sysctl_table[] = {
114059
114060 static int nf_conntrack_helper_init_sysctl(struct net *net)
114061 {
114062- struct ctl_table *table;
114063+ ctl_table_no_const *table;
114064
114065 table = kmemdup(helper_sysctl_table, sizeof(helper_sysctl_table),
114066 GFP_KERNEL);
114067diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c
114068index b65d586..beec902 100644
114069--- a/net/netfilter/nf_conntrack_proto.c
114070+++ b/net/netfilter/nf_conntrack_proto.c
114071@@ -52,7 +52,7 @@ nf_ct_register_sysctl(struct net *net,
114072
114073 static void
114074 nf_ct_unregister_sysctl(struct ctl_table_header **header,
114075- struct ctl_table **table,
114076+ ctl_table_no_const **table,
114077 unsigned int users)
114078 {
114079 if (users > 0)
114080diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
114081index fc823fa..8311af3 100644
114082--- a/net/netfilter/nf_conntrack_standalone.c
114083+++ b/net/netfilter/nf_conntrack_standalone.c
114084@@ -468,7 +468,7 @@ static struct ctl_table nf_ct_netfilter_table[] = {
114085
114086 static int nf_conntrack_standalone_init_sysctl(struct net *net)
114087 {
114088- struct ctl_table *table;
114089+ ctl_table_no_const *table;
114090
114091 table = kmemdup(nf_ct_sysctl_table, sizeof(nf_ct_sysctl_table),
114092 GFP_KERNEL);
114093diff --git a/net/netfilter/nf_conntrack_timestamp.c b/net/netfilter/nf_conntrack_timestamp.c
114094index 7a394df..bd91a8a 100644
114095--- a/net/netfilter/nf_conntrack_timestamp.c
114096+++ b/net/netfilter/nf_conntrack_timestamp.c
114097@@ -42,7 +42,7 @@ static struct nf_ct_ext_type tstamp_extend __read_mostly = {
114098 #ifdef CONFIG_SYSCTL
114099 static int nf_conntrack_tstamp_init_sysctl(struct net *net)
114100 {
114101- struct ctl_table *table;
114102+ ctl_table_no_const *table;
114103
114104 table = kmemdup(tstamp_sysctl_table, sizeof(tstamp_sysctl_table),
114105 GFP_KERNEL);
114106diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
114107index 675d12c..b36e825 100644
114108--- a/net/netfilter/nf_log.c
114109+++ b/net/netfilter/nf_log.c
114110@@ -386,7 +386,7 @@ static const struct file_operations nflog_file_ops = {
114111
114112 #ifdef CONFIG_SYSCTL
114113 static char nf_log_sysctl_fnames[NFPROTO_NUMPROTO-NFPROTO_UNSPEC][3];
114114-static struct ctl_table nf_log_sysctl_table[NFPROTO_NUMPROTO+1];
114115+static ctl_table_no_const nf_log_sysctl_table[NFPROTO_NUMPROTO+1] __read_only;
114116
114117 static int nf_log_proc_dostring(struct ctl_table *table, int write,
114118 void __user *buffer, size_t *lenp, loff_t *ppos)
114119@@ -417,13 +417,15 @@ static int nf_log_proc_dostring(struct ctl_table *table, int write,
114120 rcu_assign_pointer(net->nf.nf_loggers[tindex], logger);
114121 mutex_unlock(&nf_log_mutex);
114122 } else {
114123+ ctl_table_no_const nf_log_table = *table;
114124+
114125 mutex_lock(&nf_log_mutex);
114126 logger = nft_log_dereference(net->nf.nf_loggers[tindex]);
114127 if (!logger)
114128- table->data = "NONE";
114129+ nf_log_table.data = "NONE";
114130 else
114131- table->data = logger->name;
114132- r = proc_dostring(table, write, buffer, lenp, ppos);
114133+ nf_log_table.data = logger->name;
114134+ r = proc_dostring(&nf_log_table, write, buffer, lenp, ppos);
114135 mutex_unlock(&nf_log_mutex);
114136 }
114137
114138diff --git a/net/netfilter/nf_sockopt.c b/net/netfilter/nf_sockopt.c
114139index c68c1e5..8b5d670 100644
114140--- a/net/netfilter/nf_sockopt.c
114141+++ b/net/netfilter/nf_sockopt.c
114142@@ -43,7 +43,7 @@ int nf_register_sockopt(struct nf_sockopt_ops *reg)
114143 }
114144 }
114145
114146- list_add(&reg->list, &nf_sockopts);
114147+ pax_list_add((struct list_head *)&reg->list, &nf_sockopts);
114148 out:
114149 mutex_unlock(&nf_sockopt_mutex);
114150 return ret;
114151@@ -53,7 +53,7 @@ EXPORT_SYMBOL(nf_register_sockopt);
114152 void nf_unregister_sockopt(struct nf_sockopt_ops *reg)
114153 {
114154 mutex_lock(&nf_sockopt_mutex);
114155- list_del(&reg->list);
114156+ pax_list_del((struct list_head *)&reg->list);
114157 mutex_unlock(&nf_sockopt_mutex);
114158 }
114159 EXPORT_SYMBOL(nf_unregister_sockopt);
114160diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
114161index 11d85b3..7fcc420 100644
114162--- a/net/netfilter/nfnetlink_log.c
114163+++ b/net/netfilter/nfnetlink_log.c
114164@@ -83,7 +83,7 @@ static int nfnl_log_net_id __read_mostly;
114165 struct nfnl_log_net {
114166 spinlock_t instances_lock;
114167 struct hlist_head instance_table[INSTANCE_BUCKETS];
114168- atomic_t global_seq;
114169+ atomic_unchecked_t global_seq;
114170 };
114171
114172 static struct nfnl_log_net *nfnl_log_pernet(struct net *net)
114173@@ -563,7 +563,7 @@ __build_packet_message(struct nfnl_log_net *log,
114174 /* global sequence number */
114175 if ((inst->flags & NFULNL_CFG_F_SEQ_GLOBAL) &&
114176 nla_put_be32(inst->skb, NFULA_SEQ_GLOBAL,
114177- htonl(atomic_inc_return(&log->global_seq))))
114178+ htonl(atomic_inc_return_unchecked(&log->global_seq))))
114179 goto nla_put_failure;
114180
114181 if (data_len) {
114182diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
114183index 65f3e2b..2e9d6a0 100644
114184--- a/net/netfilter/nft_compat.c
114185+++ b/net/netfilter/nft_compat.c
114186@@ -317,14 +317,7 @@ static void nft_match_eval(const struct nft_expr *expr,
114187 return;
114188 }
114189
114190- switch(ret) {
114191- case true:
114192- data[NFT_REG_VERDICT].verdict = NFT_CONTINUE;
114193- break;
114194- case false:
114195- data[NFT_REG_VERDICT].verdict = NFT_BREAK;
114196- break;
114197- }
114198+ data[NFT_REG_VERDICT].verdict = ret ? NFT_CONTINUE : NFT_BREAK;
114199 }
114200
114201 static const struct nla_policy nft_match_policy[NFTA_MATCH_MAX + 1] = {
114202diff --git a/net/netfilter/xt_gradm.c b/net/netfilter/xt_gradm.c
114203new file mode 100644
114204index 0000000..c566332
114205--- /dev/null
114206+++ b/net/netfilter/xt_gradm.c
114207@@ -0,0 +1,51 @@
114208+/*
114209+ * gradm match for netfilter
114210